blob: fcd1a4f4f778254b987fb236c105422f0783b882 [file] [log] [blame]
Divy Le Ray4d22de32007-01-18 22:04:14 -05001/*
Divy Le Raya02d44a2008-10-13 18:47:30 -07002 * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
Divy Le Ray4d22de32007-01-18 22:04:14 -05003 *
Divy Le Ray1d68e932007-01-30 19:44:35 -08004 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
Divy Le Ray4d22de32007-01-18 22:04:14 -05009 *
Divy Le Ray1d68e932007-01-30 19:44:35 -080010 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Divy Le Ray4d22de32007-01-18 22:04:14 -050031 */
Divy Le Ray4d22de32007-01-18 22:04:14 -050032#include <linux/skbuff.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/if_vlan.h>
36#include <linux/ip.h>
37#include <linux/tcp.h>
38#include <linux/dma-mapping.h>
Karen Xiea109a5b2008-12-18 22:56:20 -080039#include <net/arp.h>
Divy Le Ray4d22de32007-01-18 22:04:14 -050040#include "common.h"
41#include "regs.h"
42#include "sge_defs.h"
43#include "t3_cpl.h"
44#include "firmware_exports.h"
45
46#define USE_GTS 0
47
48#define SGE_RX_SM_BUF_SIZE 1536
Divy Le Raye0994eb2007-02-24 16:44:17 -080049
Divy Le Ray4d22de32007-01-18 22:04:14 -050050#define SGE_RX_COPY_THRES 256
Divy Le Raycf992af2007-05-30 21:10:47 -070051#define SGE_RX_PULL_LEN 128
Divy Le Ray4d22de32007-01-18 22:04:14 -050052
Divy Le Raye0994eb2007-02-24 16:44:17 -080053/*
Divy Le Raycf992af2007-05-30 21:10:47 -070054 * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks.
55 * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs
56 * directly.
Divy Le Raye0994eb2007-02-24 16:44:17 -080057 */
Divy Le Raycf992af2007-05-30 21:10:47 -070058#define FL0_PG_CHUNK_SIZE 2048
Divy Le Ray7385ecf2008-05-21 18:56:21 -070059#define FL0_PG_ORDER 0
60#define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192)
61#define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1)
Divy Le Raycf992af2007-05-30 21:10:47 -070062
Divy Le Raye0994eb2007-02-24 16:44:17 -080063#define SGE_RX_DROP_THRES 16
Divy Le Ray42c8ea12009-03-12 21:14:04 +000064#define RX_RECLAIM_PERIOD (HZ/4)
Divy Le Ray4d22de32007-01-18 22:04:14 -050065
66/*
Divy Le Ray26b38712009-03-12 21:13:43 +000067 * Max number of Rx buffers we replenish at a time.
68 */
69#define MAX_RX_REFILL 16U
70/*
Divy Le Ray4d22de32007-01-18 22:04:14 -050071 * Period of the Tx buffer reclaim timer. This timer does not need to run
72 * frequently as Tx buffers are usually reclaimed by new Tx packets.
73 */
74#define TX_RECLAIM_PERIOD (HZ / 4)
Divy Le Ray42c8ea12009-03-12 21:14:04 +000075#define TX_RECLAIM_TIMER_CHUNK 64U
76#define TX_RECLAIM_CHUNK 16U
Divy Le Ray4d22de32007-01-18 22:04:14 -050077
78/* WR size in bytes */
79#define WR_LEN (WR_FLITS * 8)
80
81/*
82 * Types of Tx queues in each queue set. Order here matters, do not change.
83 */
84enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
85
86/* Values for sge_txq.flags */
87enum {
88 TXQ_RUNNING = 1 << 0, /* fetch engine is running */
89 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
90};
91
92struct tx_desc {
Al Virofb8e4442007-08-23 03:04:12 -040093 __be64 flit[TX_DESC_FLITS];
Divy Le Ray4d22de32007-01-18 22:04:14 -050094};
95
96struct rx_desc {
97 __be32 addr_lo;
98 __be32 len_gen;
99 __be32 gen2;
100 __be32 addr_hi;
101};
102
103struct tx_sw_desc { /* SW state per Tx descriptor */
104 struct sk_buff *skb;
Divy Le Ray23561c92007-11-16 11:22:05 -0800105 u8 eop; /* set if last descriptor for packet */
106 u8 addr_idx; /* buffer index of first SGL entry in descriptor */
107 u8 fragidx; /* first page fragment associated with descriptor */
108 s8 sflit; /* start flit of first SGL entry in descriptor */
Divy Le Ray4d22de32007-01-18 22:04:14 -0500109};
110
Divy Le Raycf992af2007-05-30 21:10:47 -0700111struct rx_sw_desc { /* SW state per Rx descriptor */
Divy Le Raye0994eb2007-02-24 16:44:17 -0800112 union {
113 struct sk_buff *skb;
Divy Le Raycf992af2007-05-30 21:10:47 -0700114 struct fl_pg_chunk pg_chunk;
115 };
116 DECLARE_PCI_UNMAP_ADDR(dma_addr);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500117};
118
119struct rsp_desc { /* response queue descriptor */
120 struct rss_header rss_hdr;
121 __be32 flags;
122 __be32 len_cq;
123 u8 imm_data[47];
124 u8 intr_gen;
125};
126
Divy Le Ray4d22de32007-01-18 22:04:14 -0500127/*
Divy Le Ray99d7cf32007-02-24 16:44:06 -0800128 * Holds unmapping information for Tx packets that need deferred unmapping.
129 * This structure lives at skb->head and must be allocated by callers.
130 */
131struct deferred_unmap_info {
132 struct pci_dev *pdev;
133 dma_addr_t addr[MAX_SKB_FRAGS + 1];
134};
135
136/*
Divy Le Ray4d22de32007-01-18 22:04:14 -0500137 * Maps a number of flits to the number of Tx descriptors that can hold them.
138 * The formula is
139 *
140 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
141 *
142 * HW allows up to 4 descriptors to be combined into a WR.
143 */
144static u8 flit_desc_map[] = {
145 0,
146#if SGE_NUM_GENBITS == 1
147 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
148 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
149 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
150 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
151#elif SGE_NUM_GENBITS == 2
152 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
153 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
154 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
155 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
156#else
157# error "SGE_NUM_GENBITS must be 1 or 2"
158#endif
159};
160
161static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
162{
163 return container_of(q, struct sge_qset, fl[qidx]);
164}
165
166static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
167{
168 return container_of(q, struct sge_qset, rspq);
169}
170
171static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
172{
173 return container_of(q, struct sge_qset, txq[qidx]);
174}
175
176/**
177 * refill_rspq - replenish an SGE response queue
178 * @adapter: the adapter
179 * @q: the response queue to replenish
180 * @credits: how many new responses to make available
181 *
182 * Replenishes a response queue by making the supplied number of responses
183 * available to HW.
184 */
185static inline void refill_rspq(struct adapter *adapter,
186 const struct sge_rspq *q, unsigned int credits)
187{
Divy Le Rayafefce62007-11-16 11:22:21 -0800188 rmb();
Divy Le Ray4d22de32007-01-18 22:04:14 -0500189 t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
190 V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
191}
192
193/**
194 * need_skb_unmap - does the platform need unmapping of sk_buffs?
195 *
196 * Returns true if the platfrom needs sk_buff unmapping. The compiler
197 * optimizes away unecessary code if this returns true.
198 */
199static inline int need_skb_unmap(void)
200{
201 /*
202 * This structure is used to tell if the platfrom needs buffer
203 * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
204 */
205 struct dummy {
206 DECLARE_PCI_UNMAP_ADDR(addr);
207 };
208
209 return sizeof(struct dummy) != 0;
210}
211
212/**
213 * unmap_skb - unmap a packet main body and its page fragments
214 * @skb: the packet
215 * @q: the Tx queue containing Tx descriptors for the packet
216 * @cidx: index of Tx descriptor
217 * @pdev: the PCI device
218 *
219 * Unmap the main body of an sk_buff and its page fragments, if any.
220 * Because of the fairly complicated structure of our SGLs and the desire
Divy Le Ray23561c92007-11-16 11:22:05 -0800221 * to conserve space for metadata, the information necessary to unmap an
222 * sk_buff is spread across the sk_buff itself (buffer lengths), the HW Tx
223 * descriptors (the physical addresses of the various data buffers), and
224 * the SW descriptor state (assorted indices). The send functions
225 * initialize the indices for the first packet descriptor so we can unmap
226 * the buffers held in the first Tx descriptor here, and we have enough
227 * information at this point to set the state for the next Tx descriptor.
228 *
229 * Note that it is possible to clean up the first descriptor of a packet
230 * before the send routines have written the next descriptors, but this
231 * race does not cause any problem. We just end up writing the unmapping
232 * info for the descriptor first.
Divy Le Ray4d22de32007-01-18 22:04:14 -0500233 */
234static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
235 unsigned int cidx, struct pci_dev *pdev)
236{
237 const struct sg_ent *sgp;
Divy Le Ray23561c92007-11-16 11:22:05 -0800238 struct tx_sw_desc *d = &q->sdesc[cidx];
239 int nfrags, frag_idx, curflit, j = d->addr_idx;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500240
Divy Le Ray23561c92007-11-16 11:22:05 -0800241 sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit];
242 frag_idx = d->fragidx;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500243
Divy Le Ray23561c92007-11-16 11:22:05 -0800244 if (frag_idx == 0 && skb_headlen(skb)) {
245 pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]),
246 skb_headlen(skb), PCI_DMA_TODEVICE);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500247 j = 1;
248 }
249
Divy Le Ray23561c92007-11-16 11:22:05 -0800250 curflit = d->sflit + 1 + j;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500251 nfrags = skb_shinfo(skb)->nr_frags;
252
253 while (frag_idx < nfrags && curflit < WR_FLITS) {
254 pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
255 skb_shinfo(skb)->frags[frag_idx].size,
256 PCI_DMA_TODEVICE);
257 j ^= 1;
258 if (j == 0) {
259 sgp++;
260 curflit++;
261 }
262 curflit++;
263 frag_idx++;
264 }
265
Divy Le Ray23561c92007-11-16 11:22:05 -0800266 if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */
267 d = cidx + 1 == q->size ? q->sdesc : d + 1;
268 d->fragidx = frag_idx;
269 d->addr_idx = j;
270 d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
Divy Le Ray4d22de32007-01-18 22:04:14 -0500271 }
272}
273
274/**
275 * free_tx_desc - reclaims Tx descriptors and their buffers
276 * @adapter: the adapter
277 * @q: the Tx queue to reclaim descriptors from
278 * @n: the number of descriptors to reclaim
279 *
280 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
281 * Tx buffers. Called with the Tx queue lock held.
282 */
283static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
284 unsigned int n)
285{
286 struct tx_sw_desc *d;
287 struct pci_dev *pdev = adapter->pdev;
288 unsigned int cidx = q->cidx;
289
Divy Le Ray99d7cf32007-02-24 16:44:06 -0800290 const int need_unmap = need_skb_unmap() &&
291 q->cntxt_id >= FW_TUNNEL_SGEEC_START;
292
Divy Le Ray4d22de32007-01-18 22:04:14 -0500293 d = &q->sdesc[cidx];
294 while (n--) {
295 if (d->skb) { /* an SGL is present */
Divy Le Ray99d7cf32007-02-24 16:44:06 -0800296 if (need_unmap)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500297 unmap_skb(d->skb, q, cidx, pdev);
Divy Le Ray23561c92007-11-16 11:22:05 -0800298 if (d->eop)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500299 kfree_skb(d->skb);
300 }
301 ++d;
302 if (++cidx == q->size) {
303 cidx = 0;
304 d = q->sdesc;
305 }
306 }
307 q->cidx = cidx;
308}
309
310/**
311 * reclaim_completed_tx - reclaims completed Tx descriptors
312 * @adapter: the adapter
313 * @q: the Tx queue to reclaim completed descriptors from
Divy Le Ray42c8ea12009-03-12 21:14:04 +0000314 * @chunk: maximum number of descriptors to reclaim
Divy Le Ray4d22de32007-01-18 22:04:14 -0500315 *
316 * Reclaims Tx descriptors that the SGE has indicated it has processed,
317 * and frees the associated buffers if possible. Called with the Tx
318 * queue's lock held.
319 */
Divy Le Ray42c8ea12009-03-12 21:14:04 +0000320static inline unsigned int reclaim_completed_tx(struct adapter *adapter,
321 struct sge_txq *q,
322 unsigned int chunk)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500323{
324 unsigned int reclaim = q->processed - q->cleaned;
325
Divy Le Ray42c8ea12009-03-12 21:14:04 +0000326 reclaim = min(chunk, reclaim);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500327 if (reclaim) {
328 free_tx_desc(adapter, q, reclaim);
329 q->cleaned += reclaim;
330 q->in_use -= reclaim;
331 }
Divy Le Ray42c8ea12009-03-12 21:14:04 +0000332 return q->processed - q->cleaned;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500333}
334
335/**
336 * should_restart_tx - are there enough resources to restart a Tx queue?
337 * @q: the Tx queue
338 *
339 * Checks if there are enough descriptors to restart a suspended Tx queue.
340 */
341static inline int should_restart_tx(const struct sge_txq *q)
342{
343 unsigned int r = q->processed - q->cleaned;
344
345 return q->in_use - r < (q->size >> 1);
346}
347
Divy Le Ray9bb2b312009-03-12 21:13:49 +0000348static void clear_rx_desc(const struct sge_fl *q, struct rx_sw_desc *d)
349{
350 if (q->use_pages) {
351 if (d->pg_chunk.page)
352 put_page(d->pg_chunk.page);
353 d->pg_chunk.page = NULL;
354 } else {
355 kfree_skb(d->skb);
356 d->skb = NULL;
357 }
358}
359
Divy Le Ray4d22de32007-01-18 22:04:14 -0500360/**
361 * free_rx_bufs - free the Rx buffers on an SGE free list
362 * @pdev: the PCI device associated with the adapter
363 * @rxq: the SGE free list to clean up
364 *
365 * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
366 * this queue should be stopped before calling this function.
367 */
368static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
369{
370 unsigned int cidx = q->cidx;
371
372 while (q->credits--) {
373 struct rx_sw_desc *d = &q->sdesc[cidx];
374
375 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
376 q->buf_size, PCI_DMA_FROMDEVICE);
Divy Le Ray9bb2b312009-03-12 21:13:49 +0000377 clear_rx_desc(q, d);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500378 if (++cidx == q->size)
379 cidx = 0;
380 }
Divy Le Raye0994eb2007-02-24 16:44:17 -0800381
Divy Le Raycf992af2007-05-30 21:10:47 -0700382 if (q->pg_chunk.page) {
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700383 __free_pages(q->pg_chunk.page, q->order);
Divy Le Raycf992af2007-05-30 21:10:47 -0700384 q->pg_chunk.page = NULL;
385 }
Divy Le Ray4d22de32007-01-18 22:04:14 -0500386}
387
388/**
389 * add_one_rx_buf - add a packet buffer to a free-buffer list
Divy Le Raycf992af2007-05-30 21:10:47 -0700390 * @va: buffer start VA
Divy Le Ray4d22de32007-01-18 22:04:14 -0500391 * @len: the buffer length
392 * @d: the HW Rx descriptor to write
393 * @sd: the SW Rx descriptor to write
394 * @gen: the generation bit value
395 * @pdev: the PCI device associated with the adapter
396 *
397 * Add a buffer of the given length to the supplied HW and SW Rx
398 * descriptors.
399 */
Divy Le Rayb1fb1f22008-05-21 18:56:16 -0700400static inline int add_one_rx_buf(void *va, unsigned int len,
401 struct rx_desc *d, struct rx_sw_desc *sd,
402 unsigned int gen, struct pci_dev *pdev)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500403{
404 dma_addr_t mapping;
405
Divy Le Raye0994eb2007-02-24 16:44:17 -0800406 mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700407 if (unlikely(pci_dma_mapping_error(pdev, mapping)))
Divy Le Rayb1fb1f22008-05-21 18:56:16 -0700408 return -ENOMEM;
409
Divy Le Ray4d22de32007-01-18 22:04:14 -0500410 pci_unmap_addr_set(sd, dma_addr, mapping);
411
412 d->addr_lo = cpu_to_be32(mapping);
413 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
414 wmb();
415 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
416 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
Divy Le Rayb1fb1f22008-05-21 18:56:16 -0700417 return 0;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500418}
419
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700420static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp,
421 unsigned int order)
Divy Le Raycf992af2007-05-30 21:10:47 -0700422{
423 if (!q->pg_chunk.page) {
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700424 q->pg_chunk.page = alloc_pages(gfp, order);
Divy Le Raycf992af2007-05-30 21:10:47 -0700425 if (unlikely(!q->pg_chunk.page))
426 return -ENOMEM;
427 q->pg_chunk.va = page_address(q->pg_chunk.page);
428 q->pg_chunk.offset = 0;
429 }
430 sd->pg_chunk = q->pg_chunk;
431
432 q->pg_chunk.offset += q->buf_size;
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700433 if (q->pg_chunk.offset == (PAGE_SIZE << order))
Divy Le Raycf992af2007-05-30 21:10:47 -0700434 q->pg_chunk.page = NULL;
435 else {
436 q->pg_chunk.va += q->buf_size;
437 get_page(q->pg_chunk.page);
438 }
439 return 0;
440}
441
Divy Le Ray26b38712009-03-12 21:13:43 +0000442static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
443{
444 if (q->pend_cred >= q->credits / 4) {
445 q->pend_cred = 0;
446 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
447 }
448}
449
Divy Le Ray4d22de32007-01-18 22:04:14 -0500450/**
451 * refill_fl - refill an SGE free-buffer list
452 * @adapter: the adapter
453 * @q: the free-list to refill
454 * @n: the number of new buffers to allocate
455 * @gfp: the gfp flags for allocating new buffers
456 *
457 * (Re)populate an SGE free-buffer list with up to @n new packet buffers,
458 * allocated with the supplied gfp flags. The caller must assure that
459 * @n does not exceed the queue's capacity.
460 */
Divy Le Rayb1fb1f22008-05-21 18:56:16 -0700461static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500462{
Divy Le Raycf992af2007-05-30 21:10:47 -0700463 void *buf_start;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500464 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
465 struct rx_desc *d = &q->desc[q->pidx];
Divy Le Rayb1fb1f22008-05-21 18:56:16 -0700466 unsigned int count = 0;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500467
468 while (n--) {
Divy Le Rayb1fb1f22008-05-21 18:56:16 -0700469 int err;
470
Divy Le Raycf992af2007-05-30 21:10:47 -0700471 if (q->use_pages) {
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700472 if (unlikely(alloc_pg_chunk(q, sd, gfp, q->order))) {
Divy Le Raycf992af2007-05-30 21:10:47 -0700473nomem: q->alloc_failed++;
Divy Le Raye0994eb2007-02-24 16:44:17 -0800474 break;
475 }
Divy Le Raycf992af2007-05-30 21:10:47 -0700476 buf_start = sd->pg_chunk.va;
Divy Le Raye0994eb2007-02-24 16:44:17 -0800477 } else {
Divy Le Raycf992af2007-05-30 21:10:47 -0700478 struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
Divy Le Raye0994eb2007-02-24 16:44:17 -0800479
Divy Le Raycf992af2007-05-30 21:10:47 -0700480 if (!skb)
481 goto nomem;
Divy Le Raye0994eb2007-02-24 16:44:17 -0800482
Divy Le Raycf992af2007-05-30 21:10:47 -0700483 sd->skb = skb;
484 buf_start = skb->data;
Divy Le Raye0994eb2007-02-24 16:44:17 -0800485 }
486
Divy Le Rayb1fb1f22008-05-21 18:56:16 -0700487 err = add_one_rx_buf(buf_start, q->buf_size, d, sd, q->gen,
488 adap->pdev);
489 if (unlikely(err)) {
Divy Le Ray9bb2b312009-03-12 21:13:49 +0000490 clear_rx_desc(q, sd);
Divy Le Rayb1fb1f22008-05-21 18:56:16 -0700491 break;
492 }
493
Divy Le Ray4d22de32007-01-18 22:04:14 -0500494 d++;
495 sd++;
496 if (++q->pidx == q->size) {
497 q->pidx = 0;
498 q->gen ^= 1;
499 sd = q->sdesc;
500 d = q->desc;
501 }
Divy Le Rayb1fb1f22008-05-21 18:56:16 -0700502 count++;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500503 }
Divy Le Ray26b38712009-03-12 21:13:43 +0000504
505 q->credits += count;
506 q->pend_cred += count;
507 ring_fl_db(adap, q);
Divy Le Rayb1fb1f22008-05-21 18:56:16 -0700508
509 return count;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500510}
511
512static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
513{
Divy Le Ray26b38712009-03-12 21:13:43 +0000514 refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits),
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700515 GFP_ATOMIC | __GFP_COMP);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500516}
517
518/**
519 * recycle_rx_buf - recycle a receive buffer
520 * @adapter: the adapter
521 * @q: the SGE free list
522 * @idx: index of buffer to recycle
523 *
524 * Recycles the specified buffer on the given free list by adding it at
525 * the next available slot on the list.
526 */
527static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
528 unsigned int idx)
529{
530 struct rx_desc *from = &q->desc[idx];
531 struct rx_desc *to = &q->desc[q->pidx];
532
Divy Le Raycf992af2007-05-30 21:10:47 -0700533 q->sdesc[q->pidx] = q->sdesc[idx];
Divy Le Ray4d22de32007-01-18 22:04:14 -0500534 to->addr_lo = from->addr_lo; /* already big endian */
535 to->addr_hi = from->addr_hi; /* likewise */
536 wmb();
537 to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
538 to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
Divy Le Ray4d22de32007-01-18 22:04:14 -0500539
540 if (++q->pidx == q->size) {
541 q->pidx = 0;
542 q->gen ^= 1;
543 }
Divy Le Ray26b38712009-03-12 21:13:43 +0000544
545 q->credits++;
546 q->pend_cred++;
547 ring_fl_db(adap, q);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500548}
549
550/**
551 * alloc_ring - allocate resources for an SGE descriptor ring
552 * @pdev: the PCI device
553 * @nelem: the number of descriptors
554 * @elem_size: the size of each descriptor
555 * @sw_size: the size of the SW state associated with each ring element
556 * @phys: the physical address of the allocated ring
557 * @metadata: address of the array holding the SW state for the ring
558 *
559 * Allocates resources for an SGE descriptor ring, such as Tx queues,
560 * free buffer lists, or response queues. Each SGE ring requires
561 * space for its HW descriptors plus, optionally, space for the SW state
562 * associated with each HW entry (the metadata). The function returns
563 * three values: the virtual address for the HW ring (the return value
564 * of the function), the physical address of the HW ring, and the address
565 * of the SW ring.
566 */
567static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
Divy Le Raye0994eb2007-02-24 16:44:17 -0800568 size_t sw_size, dma_addr_t * phys, void *metadata)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500569{
570 size_t len = nelem * elem_size;
571 void *s = NULL;
572 void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
573
574 if (!p)
575 return NULL;
Divy Le Ray52565542008-11-26 15:35:59 -0800576 if (sw_size && metadata) {
Divy Le Ray4d22de32007-01-18 22:04:14 -0500577 s = kcalloc(nelem, sw_size, GFP_KERNEL);
578
579 if (!s) {
580 dma_free_coherent(&pdev->dev, len, p, *phys);
581 return NULL;
582 }
Divy Le Ray4d22de32007-01-18 22:04:14 -0500583 *(void **)metadata = s;
Divy Le Ray52565542008-11-26 15:35:59 -0800584 }
Divy Le Ray4d22de32007-01-18 22:04:14 -0500585 memset(p, 0, len);
586 return p;
587}
588
589/**
Divy Le Ray204e2f92008-05-06 19:26:01 -0700590 * t3_reset_qset - reset a sge qset
591 * @q: the queue set
592 *
593 * Reset the qset structure.
594 * the NAPI structure is preserved in the event of
595 * the qset's reincarnation, for example during EEH recovery.
596 */
597static void t3_reset_qset(struct sge_qset *q)
598{
599 if (q->adap &&
600 !(q->adap->flags & NAPI_INIT)) {
601 memset(q, 0, sizeof(*q));
602 return;
603 }
604
605 q->adap = NULL;
606 memset(&q->rspq, 0, sizeof(q->rspq));
607 memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET);
608 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
609 q->txq_stopped = 0;
Divy Le Ray20d3fc12008-10-08 17:36:03 -0700610 q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */
Divy Le Ray42c8ea12009-03-12 21:14:04 +0000611 q->rx_reclaim_timer.function = NULL;
Herbert Xu7be2df42009-01-21 14:39:13 -0800612 q->lro_frag_tbl.nr_frags = q->lro_frag_tbl.len = 0;
Divy Le Ray204e2f92008-05-06 19:26:01 -0700613}
614
615
616/**
Divy Le Ray4d22de32007-01-18 22:04:14 -0500617 * free_qset - free the resources of an SGE queue set
618 * @adapter: the adapter owning the queue set
619 * @q: the queue set
620 *
621 * Release the HW and SW resources associated with an SGE queue set, such
622 * as HW contexts, packet buffers, and descriptor rings. Traffic to the
623 * queue set must be quiesced prior to calling this.
624 */
Stephen Hemminger9265fab2007-10-08 16:22:29 -0700625static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500626{
627 int i;
628 struct pci_dev *pdev = adapter->pdev;
629
Divy Le Ray4d22de32007-01-18 22:04:14 -0500630 for (i = 0; i < SGE_RXQ_PER_SET; ++i)
631 if (q->fl[i].desc) {
Roland Dreierb1186de2008-03-20 13:30:48 -0700632 spin_lock_irq(&adapter->sge.reg_lock);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500633 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
Roland Dreierb1186de2008-03-20 13:30:48 -0700634 spin_unlock_irq(&adapter->sge.reg_lock);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500635 free_rx_bufs(pdev, &q->fl[i]);
636 kfree(q->fl[i].sdesc);
637 dma_free_coherent(&pdev->dev,
638 q->fl[i].size *
639 sizeof(struct rx_desc), q->fl[i].desc,
640 q->fl[i].phys_addr);
641 }
642
643 for (i = 0; i < SGE_TXQ_PER_SET; ++i)
644 if (q->txq[i].desc) {
Roland Dreierb1186de2008-03-20 13:30:48 -0700645 spin_lock_irq(&adapter->sge.reg_lock);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500646 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
Roland Dreierb1186de2008-03-20 13:30:48 -0700647 spin_unlock_irq(&adapter->sge.reg_lock);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500648 if (q->txq[i].sdesc) {
649 free_tx_desc(adapter, &q->txq[i],
650 q->txq[i].in_use);
651 kfree(q->txq[i].sdesc);
652 }
653 dma_free_coherent(&pdev->dev,
654 q->txq[i].size *
655 sizeof(struct tx_desc),
656 q->txq[i].desc, q->txq[i].phys_addr);
657 __skb_queue_purge(&q->txq[i].sendq);
658 }
659
660 if (q->rspq.desc) {
Roland Dreierb1186de2008-03-20 13:30:48 -0700661 spin_lock_irq(&adapter->sge.reg_lock);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500662 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
Roland Dreierb1186de2008-03-20 13:30:48 -0700663 spin_unlock_irq(&adapter->sge.reg_lock);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500664 dma_free_coherent(&pdev->dev,
665 q->rspq.size * sizeof(struct rsp_desc),
666 q->rspq.desc, q->rspq.phys_addr);
667 }
668
Divy Le Ray204e2f92008-05-06 19:26:01 -0700669 t3_reset_qset(q);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500670}
671
672/**
673 * init_qset_cntxt - initialize an SGE queue set context info
674 * @qs: the queue set
675 * @id: the queue set id
676 *
677 * Initializes the TIDs and context ids for the queues of a queue set.
678 */
679static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
680{
681 qs->rspq.cntxt_id = id;
682 qs->fl[0].cntxt_id = 2 * id;
683 qs->fl[1].cntxt_id = 2 * id + 1;
684 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
685 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
686 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
687 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
688 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
689}
690
691/**
692 * sgl_len - calculates the size of an SGL of the given capacity
693 * @n: the number of SGL entries
694 *
695 * Calculates the number of flits needed for a scatter/gather list that
696 * can hold the given number of entries.
697 */
698static inline unsigned int sgl_len(unsigned int n)
699{
700 /* alternatively: 3 * (n / 2) + 2 * (n & 1) */
701 return (3 * n) / 2 + (n & 1);
702}
703
704/**
705 * flits_to_desc - returns the num of Tx descriptors for the given flits
706 * @n: the number of flits
707 *
708 * Calculates the number of Tx descriptors needed for the supplied number
709 * of flits.
710 */
711static inline unsigned int flits_to_desc(unsigned int n)
712{
713 BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
714 return flit_desc_map[n];
715}
716
717/**
Divy Le Raycf992af2007-05-30 21:10:47 -0700718 * get_packet - return the next ingress packet buffer from a free list
719 * @adap: the adapter that received the packet
720 * @fl: the SGE free list holding the packet
721 * @len: the packet length including any SGE padding
722 * @drop_thres: # of remaining buffers before we start dropping packets
723 *
724 * Get the next packet from a free list and complete setup of the
725 * sk_buff. If the packet is small we make a copy and recycle the
726 * original buffer, otherwise we use the original buffer itself. If a
727 * positive drop threshold is supplied packets are dropped and their
728 * buffers recycled if (a) the number of remaining buffers is under the
729 * threshold and the packet is too big to copy, or (b) the packet should
730 * be copied but there is no memory for the copy.
731 */
732static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
733 unsigned int len, unsigned int drop_thres)
734{
735 struct sk_buff *skb = NULL;
736 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
737
738 prefetch(sd->skb->data);
739 fl->credits--;
740
741 if (len <= SGE_RX_COPY_THRES) {
742 skb = alloc_skb(len, GFP_ATOMIC);
743 if (likely(skb != NULL)) {
744 __skb_put(skb, len);
745 pci_dma_sync_single_for_cpu(adap->pdev,
746 pci_unmap_addr(sd, dma_addr), len,
747 PCI_DMA_FROMDEVICE);
748 memcpy(skb->data, sd->skb->data, len);
749 pci_dma_sync_single_for_device(adap->pdev,
750 pci_unmap_addr(sd, dma_addr), len,
751 PCI_DMA_FROMDEVICE);
752 } else if (!drop_thres)
753 goto use_orig_buf;
754recycle:
755 recycle_rx_buf(adap, fl, fl->cidx);
756 return skb;
757 }
758
Divy Le Ray26b38712009-03-12 21:13:43 +0000759 if (unlikely(fl->credits < drop_thres) &&
760 refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits - 1),
761 GFP_ATOMIC | __GFP_COMP) == 0)
Divy Le Raycf992af2007-05-30 21:10:47 -0700762 goto recycle;
763
764use_orig_buf:
765 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
766 fl->buf_size, PCI_DMA_FROMDEVICE);
767 skb = sd->skb;
768 skb_put(skb, len);
769 __refill_fl(adap, fl);
770 return skb;
771}
772
773/**
774 * get_packet_pg - return the next ingress packet buffer from a free list
775 * @adap: the adapter that received the packet
776 * @fl: the SGE free list holding the packet
777 * @len: the packet length including any SGE padding
778 * @drop_thres: # of remaining buffers before we start dropping packets
779 *
780 * Get the next packet from a free list populated with page chunks.
781 * If the packet is small we make a copy and recycle the original buffer,
782 * otherwise we attach the original buffer as a page fragment to a fresh
783 * sk_buff. If a positive drop threshold is supplied packets are dropped
784 * and their buffers recycled if (a) the number of remaining buffers is
785 * under the threshold and the packet is too big to copy, or (b) there's
786 * no system memory.
787 *
788 * Note: this function is similar to @get_packet but deals with Rx buffers
789 * that are page chunks rather than sk_buffs.
790 */
791static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700792 struct sge_rspq *q, unsigned int len,
793 unsigned int drop_thres)
Divy Le Raycf992af2007-05-30 21:10:47 -0700794{
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700795 struct sk_buff *newskb, *skb;
Divy Le Raycf992af2007-05-30 21:10:47 -0700796 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
797
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700798 newskb = skb = q->pg_skb;
799
800 if (!skb && (len <= SGE_RX_COPY_THRES)) {
801 newskb = alloc_skb(len, GFP_ATOMIC);
802 if (likely(newskb != NULL)) {
803 __skb_put(newskb, len);
Divy Le Raycf992af2007-05-30 21:10:47 -0700804 pci_dma_sync_single_for_cpu(adap->pdev,
805 pci_unmap_addr(sd, dma_addr), len,
806 PCI_DMA_FROMDEVICE);
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700807 memcpy(newskb->data, sd->pg_chunk.va, len);
Divy Le Raycf992af2007-05-30 21:10:47 -0700808 pci_dma_sync_single_for_device(adap->pdev,
809 pci_unmap_addr(sd, dma_addr), len,
810 PCI_DMA_FROMDEVICE);
811 } else if (!drop_thres)
812 return NULL;
813recycle:
814 fl->credits--;
815 recycle_rx_buf(adap, fl, fl->cidx);
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700816 q->rx_recycle_buf++;
817 return newskb;
Divy Le Raycf992af2007-05-30 21:10:47 -0700818 }
819
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700820 if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres)))
Divy Le Raycf992af2007-05-30 21:10:47 -0700821 goto recycle;
822
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700823 if (!skb)
Divy Le Rayb47385b2008-05-21 18:56:26 -0700824 newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC);
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700825 if (unlikely(!newskb)) {
Divy Le Raycf992af2007-05-30 21:10:47 -0700826 if (!drop_thres)
827 return NULL;
828 goto recycle;
829 }
830
831 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
832 fl->buf_size, PCI_DMA_FROMDEVICE);
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700833 if (!skb) {
834 __skb_put(newskb, SGE_RX_PULL_LEN);
835 memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN);
836 skb_fill_page_desc(newskb, 0, sd->pg_chunk.page,
837 sd->pg_chunk.offset + SGE_RX_PULL_LEN,
838 len - SGE_RX_PULL_LEN);
839 newskb->len = len;
840 newskb->data_len = len - SGE_RX_PULL_LEN;
Divy Le Ray8f435802009-03-12 21:13:54 +0000841 newskb->truesize += newskb->data_len;
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700842 } else {
843 skb_fill_page_desc(newskb, skb_shinfo(newskb)->nr_frags,
844 sd->pg_chunk.page,
845 sd->pg_chunk.offset, len);
846 newskb->len += len;
847 newskb->data_len += len;
Divy Le Ray8f435802009-03-12 21:13:54 +0000848 newskb->truesize += len;
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700849 }
Divy Le Raycf992af2007-05-30 21:10:47 -0700850
851 fl->credits--;
852 /*
853 * We do not refill FLs here, we let the caller do it to overlap a
854 * prefetch.
855 */
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700856 return newskb;
Divy Le Raycf992af2007-05-30 21:10:47 -0700857}
858
859/**
Divy Le Ray4d22de32007-01-18 22:04:14 -0500860 * get_imm_packet - return the next ingress packet buffer from a response
861 * @resp: the response descriptor containing the packet data
862 *
863 * Return a packet containing the immediate data of the given response.
864 */
865static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
866{
867 struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
868
869 if (skb) {
870 __skb_put(skb, IMMED_PKT_SIZE);
Arnaldo Carvalho de Melo27d7ff42007-03-31 11:55:19 -0300871 skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500872 }
873 return skb;
874}
875
876/**
877 * calc_tx_descs - calculate the number of Tx descriptors for a packet
878 * @skb: the packet
879 *
880 * Returns the number of Tx descriptors needed for the given Ethernet
881 * packet. Ethernet packets require addition of WR and CPL headers.
882 */
883static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
884{
885 unsigned int flits;
886
887 if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
888 return 1;
889
890 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
891 if (skb_shinfo(skb)->gso_size)
892 flits++;
893 return flits_to_desc(flits);
894}
895
896/**
897 * make_sgl - populate a scatter/gather list for a packet
898 * @skb: the packet
899 * @sgp: the SGL to populate
900 * @start: start address of skb main body data to include in the SGL
901 * @len: length of skb main body data to include in the SGL
902 * @pdev: the PCI device
903 *
904 * Generates a scatter/gather list for the buffers that make up a packet
905 * and returns the SGL size in 8-byte words. The caller must size the SGL
906 * appropriately.
907 */
908static inline unsigned int make_sgl(const struct sk_buff *skb,
909 struct sg_ent *sgp, unsigned char *start,
910 unsigned int len, struct pci_dev *pdev)
911{
912 dma_addr_t mapping;
913 unsigned int i, j = 0, nfrags;
914
915 if (len) {
916 mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
917 sgp->len[0] = cpu_to_be32(len);
918 sgp->addr[0] = cpu_to_be64(mapping);
919 j = 1;
920 }
921
922 nfrags = skb_shinfo(skb)->nr_frags;
923 for (i = 0; i < nfrags; i++) {
924 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
925
926 mapping = pci_map_page(pdev, frag->page, frag->page_offset,
927 frag->size, PCI_DMA_TODEVICE);
928 sgp->len[j] = cpu_to_be32(frag->size);
929 sgp->addr[j] = cpu_to_be64(mapping);
930 j ^= 1;
931 if (j == 0)
932 ++sgp;
933 }
934 if (j)
935 sgp->len[j] = 0;
936 return ((nfrags + (len != 0)) * 3) / 2 + j;
937}
938
939/**
940 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
941 * @adap: the adapter
942 * @q: the Tx queue
943 *
944 * Ring the doorbel if a Tx queue is asleep. There is a natural race,
945 * where the HW is going to sleep just after we checked, however,
946 * then the interrupt handler will detect the outstanding TX packet
947 * and ring the doorbell for us.
948 *
949 * When GTS is disabled we unconditionally ring the doorbell.
950 */
951static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
952{
953#if USE_GTS
954 clear_bit(TXQ_LAST_PKT_DB, &q->flags);
955 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
956 set_bit(TXQ_LAST_PKT_DB, &q->flags);
957 t3_write_reg(adap, A_SG_KDOORBELL,
958 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
959 }
960#else
961 wmb(); /* write descriptors before telling HW */
962 t3_write_reg(adap, A_SG_KDOORBELL,
963 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
964#endif
965}
966
967static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
968{
969#if SGE_NUM_GENBITS == 2
970 d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
971#endif
972}
973
974/**
975 * write_wr_hdr_sgl - write a WR header and, optionally, SGL
976 * @ndesc: number of Tx descriptors spanned by the SGL
977 * @skb: the packet corresponding to the WR
978 * @d: first Tx descriptor to be written
979 * @pidx: index of above descriptors
980 * @q: the SGE Tx queue
981 * @sgl: the SGL
982 * @flits: number of flits to the start of the SGL in the first descriptor
983 * @sgl_flits: the SGL size in flits
984 * @gen: the Tx descriptor generation
985 * @wr_hi: top 32 bits of WR header based on WR type (big endian)
986 * @wr_lo: low 32 bits of WR header based on WR type (big endian)
987 *
988 * Write a work request header and an associated SGL. If the SGL is
989 * small enough to fit into one Tx descriptor it has already been written
990 * and we just need to write the WR header. Otherwise we distribute the
991 * SGL across the number of descriptors it spans.
992 */
993static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
994 struct tx_desc *d, unsigned int pidx,
995 const struct sge_txq *q,
996 const struct sg_ent *sgl,
997 unsigned int flits, unsigned int sgl_flits,
Al Virofb8e4442007-08-23 03:04:12 -0400998 unsigned int gen, __be32 wr_hi,
999 __be32 wr_lo)
Divy Le Ray4d22de32007-01-18 22:04:14 -05001000{
1001 struct work_request_hdr *wrp = (struct work_request_hdr *)d;
1002 struct tx_sw_desc *sd = &q->sdesc[pidx];
1003
1004 sd->skb = skb;
1005 if (need_skb_unmap()) {
Divy Le Ray23561c92007-11-16 11:22:05 -08001006 sd->fragidx = 0;
1007 sd->addr_idx = 0;
1008 sd->sflit = flits;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001009 }
1010
1011 if (likely(ndesc == 1)) {
Divy Le Ray23561c92007-11-16 11:22:05 -08001012 sd->eop = 1;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001013 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
1014 V_WR_SGLSFLT(flits)) | wr_hi;
1015 wmb();
1016 wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
1017 V_WR_GEN(gen)) | wr_lo;
1018 wr_gen2(d, gen);
1019 } else {
1020 unsigned int ogen = gen;
1021 const u64 *fp = (const u64 *)sgl;
1022 struct work_request_hdr *wp = wrp;
1023
1024 wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
1025 V_WR_SGLSFLT(flits)) | wr_hi;
1026
1027 while (sgl_flits) {
1028 unsigned int avail = WR_FLITS - flits;
1029
1030 if (avail > sgl_flits)
1031 avail = sgl_flits;
1032 memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
1033 sgl_flits -= avail;
1034 ndesc--;
1035 if (!sgl_flits)
1036 break;
1037
1038 fp += avail;
1039 d++;
Divy Le Ray23561c92007-11-16 11:22:05 -08001040 sd->eop = 0;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001041 sd++;
1042 if (++pidx == q->size) {
1043 pidx = 0;
1044 gen ^= 1;
1045 d = q->desc;
1046 sd = q->sdesc;
1047 }
1048
1049 sd->skb = skb;
1050 wrp = (struct work_request_hdr *)d;
1051 wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
1052 V_WR_SGLSFLT(1)) | wr_hi;
1053 wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
1054 sgl_flits + 1)) |
1055 V_WR_GEN(gen)) | wr_lo;
1056 wr_gen2(d, gen);
1057 flits = 1;
1058 }
Divy Le Ray23561c92007-11-16 11:22:05 -08001059 sd->eop = 1;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001060 wrp->wr_hi |= htonl(F_WR_EOP);
1061 wmb();
1062 wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
1063 wr_gen2((struct tx_desc *)wp, ogen);
1064 WARN_ON(ndesc != 0);
1065 }
1066}
1067
1068/**
1069 * write_tx_pkt_wr - write a TX_PKT work request
1070 * @adap: the adapter
1071 * @skb: the packet to send
1072 * @pi: the egress interface
1073 * @pidx: index of the first Tx descriptor to write
1074 * @gen: the generation value to use
1075 * @q: the Tx queue
1076 * @ndesc: number of descriptors the packet will occupy
1077 * @compl: the value of the COMPL bit to use
1078 *
1079 * Generate a TX_PKT work request to send the supplied packet.
1080 */
1081static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1082 const struct port_info *pi,
1083 unsigned int pidx, unsigned int gen,
1084 struct sge_txq *q, unsigned int ndesc,
1085 unsigned int compl)
1086{
1087 unsigned int flits, sgl_flits, cntrl, tso_info;
1088 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1089 struct tx_desc *d = &q->desc[pidx];
1090 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
1091
1092 cpl->len = htonl(skb->len | 0x80000000);
1093 cntrl = V_TXPKT_INTF(pi->port_id);
1094
1095 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
1096 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
1097
1098 tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
1099 if (tso_info) {
1100 int eth_type;
1101 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
1102
1103 d->flit[2] = 0;
1104 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
1105 hdr->cntrl = htonl(cntrl);
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03001106 eth_type = skb_network_offset(skb) == ETH_HLEN ?
Divy Le Ray4d22de32007-01-18 22:04:14 -05001107 CPL_ETH_II : CPL_ETH_II_VLAN;
1108 tso_info |= V_LSO_ETH_TYPE(eth_type) |
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001109 V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) |
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001110 V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001111 hdr->lso_info = htonl(tso_info);
1112 flits = 3;
1113 } else {
1114 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1115 cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */
1116 cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
1117 cpl->cntrl = htonl(cntrl);
1118
1119 if (skb->len <= WR_LEN - sizeof(*cpl)) {
1120 q->sdesc[pidx].skb = NULL;
1121 if (!skb->data_len)
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03001122 skb_copy_from_linear_data(skb, &d->flit[2],
1123 skb->len);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001124 else
1125 skb_copy_bits(skb, 0, &d->flit[2], skb->len);
1126
1127 flits = (skb->len + 7) / 8 + 2;
1128 cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
1129 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
1130 | F_WR_SOP | F_WR_EOP | compl);
1131 wmb();
1132 cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
1133 V_WR_TID(q->token));
1134 wr_gen2(d, gen);
1135 kfree_skb(skb);
1136 return;
1137 }
1138
1139 flits = 2;
1140 }
1141
1142 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1143 sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001144
1145 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
1146 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
1147 htonl(V_WR_TID(q->token)));
1148}
1149
Divy Le Ray82ad3322008-12-16 01:09:39 -08001150static inline void t3_stop_tx_queue(struct netdev_queue *txq,
1151 struct sge_qset *qs, struct sge_txq *q)
Krishna Kumara8cc21f2008-01-30 12:30:16 +05301152{
Divy Le Ray82ad3322008-12-16 01:09:39 -08001153 netif_tx_stop_queue(txq);
Krishna Kumara8cc21f2008-01-30 12:30:16 +05301154 set_bit(TXQ_ETH, &qs->txq_stopped);
1155 q->stops++;
1156}
1157
Divy Le Ray4d22de32007-01-18 22:04:14 -05001158/**
1159 * eth_xmit - add a packet to the Ethernet Tx queue
1160 * @skb: the packet
1161 * @dev: the egress net device
1162 *
1163 * Add a packet to an SGE Tx queue. Runs with softirqs disabled.
1164 */
1165int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1166{
Divy Le Ray82ad3322008-12-16 01:09:39 -08001167 int qidx;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001168 unsigned int ndesc, pidx, credits, gen, compl;
1169 const struct port_info *pi = netdev_priv(dev);
Divy Le Ray5fbf8162007-08-29 19:15:47 -07001170 struct adapter *adap = pi->adapter;
Divy Le Ray82ad3322008-12-16 01:09:39 -08001171 struct netdev_queue *txq;
1172 struct sge_qset *qs;
1173 struct sge_txq *q;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001174
1175 /*
1176 * The chip min packet length is 9 octets but play safe and reject
1177 * anything shorter than an Ethernet header.
1178 */
1179 if (unlikely(skb->len < ETH_HLEN)) {
1180 dev_kfree_skb(skb);
1181 return NETDEV_TX_OK;
1182 }
1183
Divy Le Ray82ad3322008-12-16 01:09:39 -08001184 qidx = skb_get_queue_mapping(skb);
1185 qs = &pi->qs[qidx];
1186 q = &qs->txq[TXQ_ETH];
1187 txq = netdev_get_tx_queue(dev, qidx);
1188
Divy Le Ray4d22de32007-01-18 22:04:14 -05001189 spin_lock(&q->lock);
Divy Le Ray42c8ea12009-03-12 21:14:04 +00001190 reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001191
1192 credits = q->size - q->in_use;
1193 ndesc = calc_tx_descs(skb);
1194
1195 if (unlikely(credits < ndesc)) {
Divy Le Ray82ad3322008-12-16 01:09:39 -08001196 t3_stop_tx_queue(txq, qs, q);
Krishna Kumara8cc21f2008-01-30 12:30:16 +05301197 dev_err(&adap->pdev->dev,
1198 "%s: Tx ring %u full while queue awake!\n",
1199 dev->name, q->cntxt_id & 7);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001200 spin_unlock(&q->lock);
1201 return NETDEV_TX_BUSY;
1202 }
1203
1204 q->in_use += ndesc;
Divy Le Raycd7e9032008-03-13 00:13:30 -07001205 if (unlikely(credits - ndesc < q->stop_thres)) {
Divy Le Ray82ad3322008-12-16 01:09:39 -08001206 t3_stop_tx_queue(txq, qs, q);
Divy Le Raycd7e9032008-03-13 00:13:30 -07001207
1208 if (should_restart_tx(q) &&
1209 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1210 q->restarts++;
Divy Le Ray82ad3322008-12-16 01:09:39 -08001211 netif_tx_wake_queue(txq);
Divy Le Raycd7e9032008-03-13 00:13:30 -07001212 }
1213 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05001214
1215 gen = q->gen;
1216 q->unacked += ndesc;
1217 compl = (q->unacked & 8) << (S_WR_COMPL - 3);
1218 q->unacked &= 7;
1219 pidx = q->pidx;
1220 q->pidx += ndesc;
1221 if (q->pidx >= q->size) {
1222 q->pidx -= q->size;
1223 q->gen ^= 1;
1224 }
1225
1226 /* update port statistics */
1227 if (skb->ip_summed == CHECKSUM_COMPLETE)
1228 qs->port_stats[SGE_PSTAT_TX_CSUM]++;
1229 if (skb_shinfo(skb)->gso_size)
1230 qs->port_stats[SGE_PSTAT_TSO]++;
1231 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
1232 qs->port_stats[SGE_PSTAT_VLANINS]++;
1233
1234 dev->trans_start = jiffies;
1235 spin_unlock(&q->lock);
1236
1237 /*
1238 * We do not use Tx completion interrupts to free DMAd Tx packets.
1239 * This is good for performamce but means that we rely on new Tx
1240 * packets arriving to run the destructors of completed packets,
1241 * which open up space in their sockets' send queues. Sometimes
1242 * we do not get such new packets causing Tx to stall. A single
1243 * UDP transmitter is a good example of this situation. We have
1244 * a clean up timer that periodically reclaims completed packets
1245 * but it doesn't run often enough (nor do we want it to) to prevent
1246 * lengthy stalls. A solution to this problem is to run the
1247 * destructor early, after the packet is queued but before it's DMAd.
1248 * A cons is that we lie to socket memory accounting, but the amount
1249 * of extra memory is reasonable (limited by the number of Tx
1250 * descriptors), the packets do actually get freed quickly by new
1251 * packets almost always, and for protocols like TCP that wait for
1252 * acks to really free up the data the extra memory is even less.
1253 * On the positive side we run the destructors on the sending CPU
1254 * rather than on a potentially different completing CPU, usually a
1255 * good thing. We also run them without holding our Tx queue lock,
1256 * unlike what reclaim_completed_tx() would otherwise do.
1257 *
1258 * Run the destructor before telling the DMA engine about the packet
1259 * to make sure it doesn't complete and get freed prematurely.
1260 */
1261 if (likely(!skb_shared(skb)))
1262 skb_orphan(skb);
1263
1264 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
1265 check_ring_tx_db(adap, q);
1266 return NETDEV_TX_OK;
1267}
1268
1269/**
1270 * write_imm - write a packet into a Tx descriptor as immediate data
1271 * @d: the Tx descriptor to write
1272 * @skb: the packet
1273 * @len: the length of packet data to write as immediate data
1274 * @gen: the generation bit value to write
1275 *
1276 * Writes a packet as immediate data into a Tx descriptor. The packet
1277 * contains a work request at its beginning. We must write the packet
Divy Le Ray27186dc2007-08-21 20:49:15 -07001278 * carefully so the SGE doesn't read it accidentally before it's written
1279 * in its entirety.
Divy Le Ray4d22de32007-01-18 22:04:14 -05001280 */
1281static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
1282 unsigned int len, unsigned int gen)
1283{
1284 struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
1285 struct work_request_hdr *to = (struct work_request_hdr *)d;
1286
Divy Le Ray27186dc2007-08-21 20:49:15 -07001287 if (likely(!skb->data_len))
1288 memcpy(&to[1], &from[1], len - sizeof(*from));
1289 else
1290 skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from));
1291
Divy Le Ray4d22de32007-01-18 22:04:14 -05001292 to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
1293 V_WR_BCNTLFLT(len & 7));
1294 wmb();
1295 to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
1296 V_WR_LEN((len + 7) / 8));
1297 wr_gen2(d, gen);
1298 kfree_skb(skb);
1299}
1300
1301/**
1302 * check_desc_avail - check descriptor availability on a send queue
1303 * @adap: the adapter
1304 * @q: the send queue
1305 * @skb: the packet needing the descriptors
1306 * @ndesc: the number of Tx descriptors needed
1307 * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1308 *
1309 * Checks if the requested number of Tx descriptors is available on an
1310 * SGE send queue. If the queue is already suspended or not enough
1311 * descriptors are available the packet is queued for later transmission.
1312 * Must be called with the Tx queue locked.
1313 *
1314 * Returns 0 if enough descriptors are available, 1 if there aren't
1315 * enough descriptors and the packet has been queued, and 2 if the caller
1316 * needs to retry because there weren't enough descriptors at the
1317 * beginning of the call but some freed up in the mean time.
1318 */
1319static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
1320 struct sk_buff *skb, unsigned int ndesc,
1321 unsigned int qid)
1322{
1323 if (unlikely(!skb_queue_empty(&q->sendq))) {
1324 addq_exit:__skb_queue_tail(&q->sendq, skb);
1325 return 1;
1326 }
1327 if (unlikely(q->size - q->in_use < ndesc)) {
1328 struct sge_qset *qs = txq_to_qset(q, qid);
1329
1330 set_bit(qid, &qs->txq_stopped);
1331 smp_mb__after_clear_bit();
1332
1333 if (should_restart_tx(q) &&
1334 test_and_clear_bit(qid, &qs->txq_stopped))
1335 return 2;
1336
1337 q->stops++;
1338 goto addq_exit;
1339 }
1340 return 0;
1341}
1342
1343/**
1344 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1345 * @q: the SGE control Tx queue
1346 *
1347 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1348 * that send only immediate data (presently just the control queues) and
1349 * thus do not have any sk_buffs to release.
1350 */
1351static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1352{
1353 unsigned int reclaim = q->processed - q->cleaned;
1354
1355 q->in_use -= reclaim;
1356 q->cleaned += reclaim;
1357}
1358
1359static inline int immediate(const struct sk_buff *skb)
1360{
Divy Le Ray27186dc2007-08-21 20:49:15 -07001361 return skb->len <= WR_LEN;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001362}
1363
1364/**
1365 * ctrl_xmit - send a packet through an SGE control Tx queue
1366 * @adap: the adapter
1367 * @q: the control queue
1368 * @skb: the packet
1369 *
1370 * Send a packet through an SGE control Tx queue. Packets sent through
1371 * a control queue must fit entirely as immediate data in a single Tx
1372 * descriptor and have no page fragments.
1373 */
1374static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
1375 struct sk_buff *skb)
1376{
1377 int ret;
1378 struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
1379
1380 if (unlikely(!immediate(skb))) {
1381 WARN_ON(1);
1382 dev_kfree_skb(skb);
1383 return NET_XMIT_SUCCESS;
1384 }
1385
1386 wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
1387 wrp->wr_lo = htonl(V_WR_TID(q->token));
1388
1389 spin_lock(&q->lock);
1390 again:reclaim_completed_tx_imm(q);
1391
1392 ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
1393 if (unlikely(ret)) {
1394 if (ret == 1) {
1395 spin_unlock(&q->lock);
1396 return NET_XMIT_CN;
1397 }
1398 goto again;
1399 }
1400
1401 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1402
1403 q->in_use++;
1404 if (++q->pidx >= q->size) {
1405 q->pidx = 0;
1406 q->gen ^= 1;
1407 }
1408 spin_unlock(&q->lock);
1409 wmb();
1410 t3_write_reg(adap, A_SG_KDOORBELL,
1411 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1412 return NET_XMIT_SUCCESS;
1413}
1414
1415/**
1416 * restart_ctrlq - restart a suspended control queue
1417 * @qs: the queue set cotaining the control queue
1418 *
1419 * Resumes transmission on a suspended Tx control queue.
1420 */
1421static void restart_ctrlq(unsigned long data)
1422{
1423 struct sk_buff *skb;
1424 struct sge_qset *qs = (struct sge_qset *)data;
1425 struct sge_txq *q = &qs->txq[TXQ_CTRL];
Divy Le Ray4d22de32007-01-18 22:04:14 -05001426
1427 spin_lock(&q->lock);
1428 again:reclaim_completed_tx_imm(q);
1429
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001430 while (q->in_use < q->size &&
1431 (skb = __skb_dequeue(&q->sendq)) != NULL) {
Divy Le Ray4d22de32007-01-18 22:04:14 -05001432
1433 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1434
1435 if (++q->pidx >= q->size) {
1436 q->pidx = 0;
1437 q->gen ^= 1;
1438 }
1439 q->in_use++;
1440 }
1441
1442 if (!skb_queue_empty(&q->sendq)) {
1443 set_bit(TXQ_CTRL, &qs->txq_stopped);
1444 smp_mb__after_clear_bit();
1445
1446 if (should_restart_tx(q) &&
1447 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1448 goto again;
1449 q->stops++;
1450 }
1451
1452 spin_unlock(&q->lock);
Divy Le Rayafefce62007-11-16 11:22:21 -08001453 wmb();
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001454 t3_write_reg(qs->adap, A_SG_KDOORBELL,
Divy Le Ray4d22de32007-01-18 22:04:14 -05001455 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1456}
1457
Divy Le Ray14ab9892007-01-30 19:43:50 -08001458/*
1459 * Send a management message through control queue 0
1460 */
1461int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1462{
Divy Le Ray204e2f92008-05-06 19:26:01 -07001463 int ret;
Divy Le Raybc4b6b522007-12-17 18:47:41 -08001464 local_bh_disable();
1465 ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
1466 local_bh_enable();
1467
1468 return ret;
Divy Le Ray14ab9892007-01-30 19:43:50 -08001469}
1470
Divy Le Ray4d22de32007-01-18 22:04:14 -05001471/**
Divy Le Ray99d7cf32007-02-24 16:44:06 -08001472 * deferred_unmap_destructor - unmap a packet when it is freed
1473 * @skb: the packet
1474 *
1475 * This is the packet destructor used for Tx packets that need to remain
1476 * mapped until they are freed rather than until their Tx descriptors are
1477 * freed.
1478 */
1479static void deferred_unmap_destructor(struct sk_buff *skb)
1480{
1481 int i;
1482 const dma_addr_t *p;
1483 const struct skb_shared_info *si;
1484 const struct deferred_unmap_info *dui;
Divy Le Ray99d7cf32007-02-24 16:44:06 -08001485
1486 dui = (struct deferred_unmap_info *)skb->head;
1487 p = dui->addr;
1488
Divy Le Ray23561c92007-11-16 11:22:05 -08001489 if (skb->tail - skb->transport_header)
1490 pci_unmap_single(dui->pdev, *p++,
1491 skb->tail - skb->transport_header,
1492 PCI_DMA_TODEVICE);
Divy Le Ray99d7cf32007-02-24 16:44:06 -08001493
1494 si = skb_shinfo(skb);
1495 for (i = 0; i < si->nr_frags; i++)
1496 pci_unmap_page(dui->pdev, *p++, si->frags[i].size,
1497 PCI_DMA_TODEVICE);
1498}
1499
1500static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
1501 const struct sg_ent *sgl, int sgl_flits)
1502{
1503 dma_addr_t *p;
1504 struct deferred_unmap_info *dui;
1505
1506 dui = (struct deferred_unmap_info *)skb->head;
1507 dui->pdev = pdev;
1508 for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
1509 *p++ = be64_to_cpu(sgl->addr[0]);
1510 *p++ = be64_to_cpu(sgl->addr[1]);
1511 }
1512 if (sgl_flits)
1513 *p = be64_to_cpu(sgl->addr[0]);
1514}
1515
1516/**
Divy Le Ray4d22de32007-01-18 22:04:14 -05001517 * write_ofld_wr - write an offload work request
1518 * @adap: the adapter
1519 * @skb: the packet to send
1520 * @q: the Tx queue
1521 * @pidx: index of the first Tx descriptor to write
1522 * @gen: the generation value to use
1523 * @ndesc: number of descriptors the packet will occupy
1524 *
1525 * Write an offload work request to send the supplied packet. The packet
1526 * data already carry the work request with most fields populated.
1527 */
1528static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1529 struct sge_txq *q, unsigned int pidx,
1530 unsigned int gen, unsigned int ndesc)
1531{
1532 unsigned int sgl_flits, flits;
1533 struct work_request_hdr *from;
1534 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1535 struct tx_desc *d = &q->desc[pidx];
1536
1537 if (immediate(skb)) {
1538 q->sdesc[pidx].skb = NULL;
1539 write_imm(d, skb, skb->len, gen);
1540 return;
1541 }
1542
1543 /* Only TX_DATA builds SGLs */
1544
1545 from = (struct work_request_hdr *)skb->data;
Arnaldo Carvalho de Meloea2ae172007-04-25 17:55:53 -07001546 memcpy(&d->flit[1], &from[1],
1547 skb_transport_offset(skb) - sizeof(*from));
Divy Le Ray4d22de32007-01-18 22:04:14 -05001548
Arnaldo Carvalho de Meloea2ae172007-04-25 17:55:53 -07001549 flits = skb_transport_offset(skb) / 8;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001550 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001551 sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001552 skb->tail - skb->transport_header,
Divy Le Ray4d22de32007-01-18 22:04:14 -05001553 adap->pdev);
Divy Le Ray99d7cf32007-02-24 16:44:06 -08001554 if (need_skb_unmap()) {
1555 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
1556 skb->destructor = deferred_unmap_destructor;
Divy Le Ray99d7cf32007-02-24 16:44:06 -08001557 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05001558
1559 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
1560 gen, from->wr_hi, from->wr_lo);
1561}
1562
1563/**
1564 * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
1565 * @skb: the packet
1566 *
1567 * Returns the number of Tx descriptors needed for the given offload
1568 * packet. These packets are already fully constructed.
1569 */
1570static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
1571{
Divy Le Ray27186dc2007-08-21 20:49:15 -07001572 unsigned int flits, cnt;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001573
Divy Le Ray27186dc2007-08-21 20:49:15 -07001574 if (skb->len <= WR_LEN)
Divy Le Ray4d22de32007-01-18 22:04:14 -05001575 return 1; /* packet fits as immediate data */
1576
Arnaldo Carvalho de Meloea2ae172007-04-25 17:55:53 -07001577 flits = skb_transport_offset(skb) / 8; /* headers */
Divy Le Ray27186dc2007-08-21 20:49:15 -07001578 cnt = skb_shinfo(skb)->nr_frags;
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001579 if (skb->tail != skb->transport_header)
Divy Le Ray4d22de32007-01-18 22:04:14 -05001580 cnt++;
1581 return flits_to_desc(flits + sgl_len(cnt));
1582}
1583
1584/**
1585 * ofld_xmit - send a packet through an offload queue
1586 * @adap: the adapter
1587 * @q: the Tx offload queue
1588 * @skb: the packet
1589 *
1590 * Send an offload packet through an SGE offload queue.
1591 */
1592static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
1593 struct sk_buff *skb)
1594{
1595 int ret;
1596 unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
1597
1598 spin_lock(&q->lock);
Divy Le Ray42c8ea12009-03-12 21:14:04 +00001599again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001600
1601 ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
1602 if (unlikely(ret)) {
1603 if (ret == 1) {
1604 skb->priority = ndesc; /* save for restart */
1605 spin_unlock(&q->lock);
1606 return NET_XMIT_CN;
1607 }
1608 goto again;
1609 }
1610
1611 gen = q->gen;
1612 q->in_use += ndesc;
1613 pidx = q->pidx;
1614 q->pidx += ndesc;
1615 if (q->pidx >= q->size) {
1616 q->pidx -= q->size;
1617 q->gen ^= 1;
1618 }
1619 spin_unlock(&q->lock);
1620
1621 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1622 check_ring_tx_db(adap, q);
1623 return NET_XMIT_SUCCESS;
1624}
1625
1626/**
1627 * restart_offloadq - restart a suspended offload queue
1628 * @qs: the queue set cotaining the offload queue
1629 *
1630 * Resumes transmission on a suspended Tx offload queue.
1631 */
1632static void restart_offloadq(unsigned long data)
1633{
1634 struct sk_buff *skb;
1635 struct sge_qset *qs = (struct sge_qset *)data;
1636 struct sge_txq *q = &qs->txq[TXQ_OFLD];
Divy Le Ray5fbf8162007-08-29 19:15:47 -07001637 const struct port_info *pi = netdev_priv(qs->netdev);
1638 struct adapter *adap = pi->adapter;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001639
1640 spin_lock(&q->lock);
Divy Le Ray42c8ea12009-03-12 21:14:04 +00001641again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001642
1643 while ((skb = skb_peek(&q->sendq)) != NULL) {
1644 unsigned int gen, pidx;
1645 unsigned int ndesc = skb->priority;
1646
1647 if (unlikely(q->size - q->in_use < ndesc)) {
1648 set_bit(TXQ_OFLD, &qs->txq_stopped);
1649 smp_mb__after_clear_bit();
1650
1651 if (should_restart_tx(q) &&
1652 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
1653 goto again;
1654 q->stops++;
1655 break;
1656 }
1657
1658 gen = q->gen;
1659 q->in_use += ndesc;
1660 pidx = q->pidx;
1661 q->pidx += ndesc;
1662 if (q->pidx >= q->size) {
1663 q->pidx -= q->size;
1664 q->gen ^= 1;
1665 }
1666 __skb_unlink(skb, &q->sendq);
1667 spin_unlock(&q->lock);
1668
1669 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1670 spin_lock(&q->lock);
1671 }
1672 spin_unlock(&q->lock);
1673
1674#if USE_GTS
1675 set_bit(TXQ_RUNNING, &q->flags);
1676 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1677#endif
Divy Le Rayafefce62007-11-16 11:22:21 -08001678 wmb();
Divy Le Ray4d22de32007-01-18 22:04:14 -05001679 t3_write_reg(adap, A_SG_KDOORBELL,
1680 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1681}
1682
1683/**
1684 * queue_set - return the queue set a packet should use
1685 * @skb: the packet
1686 *
1687 * Maps a packet to the SGE queue set it should use. The desired queue
1688 * set is carried in bits 1-3 in the packet's priority.
1689 */
1690static inline int queue_set(const struct sk_buff *skb)
1691{
1692 return skb->priority >> 1;
1693}
1694
1695/**
1696 * is_ctrl_pkt - return whether an offload packet is a control packet
1697 * @skb: the packet
1698 *
1699 * Determines whether an offload packet should use an OFLD or a CTRL
1700 * Tx queue. This is indicated by bit 0 in the packet's priority.
1701 */
1702static inline int is_ctrl_pkt(const struct sk_buff *skb)
1703{
1704 return skb->priority & 1;
1705}
1706
1707/**
1708 * t3_offload_tx - send an offload packet
1709 * @tdev: the offload device to send to
1710 * @skb: the packet
1711 *
1712 * Sends an offload packet. We use the packet priority to select the
1713 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1714 * should be sent as regular or control, bits 1-3 select the queue set.
1715 */
1716int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
1717{
1718 struct adapter *adap = tdev2adap(tdev);
1719 struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
1720
1721 if (unlikely(is_ctrl_pkt(skb)))
1722 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1723
1724 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1725}
1726
1727/**
1728 * offload_enqueue - add an offload packet to an SGE offload receive queue
1729 * @q: the SGE response queue
1730 * @skb: the packet
1731 *
1732 * Add a new offload packet to an SGE response queue's offload packet
1733 * queue. If the packet is the first on the queue it schedules the RX
1734 * softirq to process the queue.
1735 */
1736static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
1737{
David S. Miller147e70e2008-09-22 01:29:52 -07001738 int was_empty = skb_queue_empty(&q->rx_queue);
1739
1740 __skb_queue_tail(&q->rx_queue, skb);
1741
1742 if (was_empty) {
Divy Le Ray4d22de32007-01-18 22:04:14 -05001743 struct sge_qset *qs = rspq_to_qset(q);
1744
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001745 napi_schedule(&qs->napi);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001746 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05001747}
1748
1749/**
1750 * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
1751 * @tdev: the offload device that will be receiving the packets
1752 * @q: the SGE response queue that assembled the bundle
1753 * @skbs: the partial bundle
1754 * @n: the number of packets in the bundle
1755 *
1756 * Delivers a (partial) bundle of Rx offload packets to an offload device.
1757 */
1758static inline void deliver_partial_bundle(struct t3cdev *tdev,
1759 struct sge_rspq *q,
1760 struct sk_buff *skbs[], int n)
1761{
1762 if (n) {
1763 q->offload_bundles++;
1764 tdev->recv(tdev, skbs, n);
1765 }
1766}
1767
1768/**
1769 * ofld_poll - NAPI handler for offload packets in interrupt mode
1770 * @dev: the network device doing the polling
1771 * @budget: polling budget
1772 *
1773 * The NAPI handler for offload packets when a response queue is serviced
1774 * by the hard interrupt handler, i.e., when it's operating in non-polling
1775 * mode. Creates small packet batches and sends them through the offload
1776 * receive handler. Batches need to be of modest size as we do prefetches
1777 * on the packets in each.
1778 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001779static int ofld_poll(struct napi_struct *napi, int budget)
Divy Le Ray4d22de32007-01-18 22:04:14 -05001780{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001781 struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001782 struct sge_rspq *q = &qs->rspq;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001783 struct adapter *adapter = qs->adap;
1784 int work_done = 0;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001785
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001786 while (work_done < budget) {
David S. Miller147e70e2008-09-22 01:29:52 -07001787 struct sk_buff *skb, *tmp, *skbs[RX_BUNDLE_SIZE];
1788 struct sk_buff_head queue;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001789 int ngathered;
1790
1791 spin_lock_irq(&q->lock);
David S. Miller147e70e2008-09-22 01:29:52 -07001792 __skb_queue_head_init(&queue);
1793 skb_queue_splice_init(&q->rx_queue, &queue);
1794 if (skb_queue_empty(&queue)) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001795 napi_complete(napi);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001796 spin_unlock_irq(&q->lock);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001797 return work_done;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001798 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05001799 spin_unlock_irq(&q->lock);
1800
David S. Miller147e70e2008-09-22 01:29:52 -07001801 ngathered = 0;
1802 skb_queue_walk_safe(&queue, skb, tmp) {
1803 if (work_done >= budget)
1804 break;
1805 work_done++;
1806
1807 __skb_unlink(skb, &queue);
1808 prefetch(skb->data);
1809 skbs[ngathered] = skb;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001810 if (++ngathered == RX_BUNDLE_SIZE) {
1811 q->offload_bundles++;
1812 adapter->tdev.recv(&adapter->tdev, skbs,
1813 ngathered);
1814 ngathered = 0;
1815 }
1816 }
David S. Miller147e70e2008-09-22 01:29:52 -07001817 if (!skb_queue_empty(&queue)) {
1818 /* splice remaining packets back onto Rx queue */
Divy Le Ray4d22de32007-01-18 22:04:14 -05001819 spin_lock_irq(&q->lock);
David S. Miller147e70e2008-09-22 01:29:52 -07001820 skb_queue_splice(&queue, &q->rx_queue);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001821 spin_unlock_irq(&q->lock);
1822 }
1823 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
1824 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001825
1826 return work_done;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001827}
1828
1829/**
1830 * rx_offload - process a received offload packet
1831 * @tdev: the offload device receiving the packet
1832 * @rq: the response queue that received the packet
1833 * @skb: the packet
1834 * @rx_gather: a gather list of packets if we are building a bundle
1835 * @gather_idx: index of the next available slot in the bundle
1836 *
1837 * Process an ingress offload pakcet and add it to the offload ingress
1838 * queue. Returns the index of the next available slot in the bundle.
1839 */
1840static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
1841 struct sk_buff *skb, struct sk_buff *rx_gather[],
1842 unsigned int gather_idx)
1843{
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001844 skb_reset_mac_header(skb);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001845 skb_reset_network_header(skb);
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001846 skb_reset_transport_header(skb);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001847
1848 if (rq->polling) {
1849 rx_gather[gather_idx++] = skb;
1850 if (gather_idx == RX_BUNDLE_SIZE) {
1851 tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
1852 gather_idx = 0;
1853 rq->offload_bundles++;
1854 }
1855 } else
1856 offload_enqueue(rq, skb);
1857
1858 return gather_idx;
1859}
1860
1861/**
Divy Le Ray4d22de32007-01-18 22:04:14 -05001862 * restart_tx - check whether to restart suspended Tx queues
1863 * @qs: the queue set to resume
1864 *
1865 * Restarts suspended Tx queues of an SGE queue set if they have enough
1866 * free resources to resume operation.
1867 */
1868static void restart_tx(struct sge_qset *qs)
1869{
1870 if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
1871 should_restart_tx(&qs->txq[TXQ_ETH]) &&
1872 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1873 qs->txq[TXQ_ETH].restarts++;
1874 if (netif_running(qs->netdev))
Divy Le Ray82ad3322008-12-16 01:09:39 -08001875 netif_tx_wake_queue(qs->tx_q);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001876 }
1877
1878 if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
1879 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
1880 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
1881 qs->txq[TXQ_OFLD].restarts++;
1882 tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
1883 }
1884 if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
1885 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
1886 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
1887 qs->txq[TXQ_CTRL].restarts++;
1888 tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
1889 }
1890}
1891
1892/**
Karen Xiea109a5b2008-12-18 22:56:20 -08001893 * cxgb3_arp_process - process an ARP request probing a private IP address
1894 * @adapter: the adapter
1895 * @skb: the skbuff containing the ARP request
1896 *
1897 * Check if the ARP request is probing the private IP address
1898 * dedicated to iSCSI, generate an ARP reply if so.
1899 */
1900static void cxgb3_arp_process(struct adapter *adapter, struct sk_buff *skb)
1901{
1902 struct net_device *dev = skb->dev;
1903 struct port_info *pi;
1904 struct arphdr *arp;
1905 unsigned char *arp_ptr;
1906 unsigned char *sha;
1907 __be32 sip, tip;
1908
1909 if (!dev)
1910 return;
1911
1912 skb_reset_network_header(skb);
1913 arp = arp_hdr(skb);
1914
1915 if (arp->ar_op != htons(ARPOP_REQUEST))
1916 return;
1917
1918 arp_ptr = (unsigned char *)(arp + 1);
1919 sha = arp_ptr;
1920 arp_ptr += dev->addr_len;
1921 memcpy(&sip, arp_ptr, sizeof(sip));
1922 arp_ptr += sizeof(sip);
1923 arp_ptr += dev->addr_len;
1924 memcpy(&tip, arp_ptr, sizeof(tip));
1925
1926 pi = netdev_priv(dev);
1927 if (tip != pi->iscsi_ipv4addr)
1928 return;
1929
1930 arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
1931 dev->dev_addr, sha);
1932
1933}
1934
1935static inline int is_arp(struct sk_buff *skb)
1936{
1937 return skb->protocol == htons(ETH_P_ARP);
1938}
1939
1940/**
Divy Le Ray4d22de32007-01-18 22:04:14 -05001941 * rx_eth - process an ingress ethernet packet
1942 * @adap: the adapter
1943 * @rq: the response queue that received the packet
1944 * @skb: the packet
1945 * @pad: amount of padding at the start of the buffer
1946 *
1947 * Process an ingress ethernet pakcet and deliver it to the stack.
1948 * The padding is 2 if the packet was delivered in an Rx buffer and 0
1949 * if it was immediate data in a response.
1950 */
1951static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
Divy Le Rayb47385b2008-05-21 18:56:26 -07001952 struct sk_buff *skb, int pad, int lro)
Divy Le Ray4d22de32007-01-18 22:04:14 -05001953{
1954 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
Divy Le Rayb47385b2008-05-21 18:56:26 -07001955 struct sge_qset *qs = rspq_to_qset(rq);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001956 struct port_info *pi;
1957
Divy Le Ray4d22de32007-01-18 22:04:14 -05001958 skb_pull(skb, sizeof(*p) + pad);
Arnaldo Carvalho de Melo4c13eb62007-04-25 17:40:23 -07001959 skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001960 pi = netdev_priv(skb->dev);
Roland Dreier47fd23f2009-01-11 00:19:36 -08001961 if ((pi->rx_offload & T3_RX_CSUM) && p->csum_valid && p->csum == htons(0xffff) &&
Divy Le Ray4d22de32007-01-18 22:04:14 -05001962 !p->fragment) {
Karen Xiea109a5b2008-12-18 22:56:20 -08001963 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001964 skb->ip_summed = CHECKSUM_UNNECESSARY;
1965 } else
1966 skb->ip_summed = CHECKSUM_NONE;
David S. Miller0c8dfc82009-01-27 16:22:32 -08001967 skb_record_rx_queue(skb, qs - &adap->sge.qs[0]);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001968
1969 if (unlikely(p->vlan_valid)) {
1970 struct vlan_group *grp = pi->vlan_grp;
1971
Divy Le Rayb47385b2008-05-21 18:56:26 -07001972 qs->port_stats[SGE_PSTAT_VLANEX]++;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001973 if (likely(grp))
Divy Le Rayb47385b2008-05-21 18:56:26 -07001974 if (lro)
Herbert Xu7be2df42009-01-21 14:39:13 -08001975 vlan_gro_receive(&qs->napi, grp,
1976 ntohs(p->vlan), skb);
Karen Xiea109a5b2008-12-18 22:56:20 -08001977 else {
1978 if (unlikely(pi->iscsi_ipv4addr &&
1979 is_arp(skb))) {
1980 unsigned short vtag = ntohs(p->vlan) &
1981 VLAN_VID_MASK;
1982 skb->dev = vlan_group_get_device(grp,
1983 vtag);
1984 cxgb3_arp_process(adap, skb);
1985 }
Divy Le Rayb47385b2008-05-21 18:56:26 -07001986 __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan),
1987 rq->polling);
Karen Xiea109a5b2008-12-18 22:56:20 -08001988 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05001989 else
1990 dev_kfree_skb_any(skb);
Divy Le Rayb47385b2008-05-21 18:56:26 -07001991 } else if (rq->polling) {
1992 if (lro)
Herbert Xu7be2df42009-01-21 14:39:13 -08001993 napi_gro_receive(&qs->napi, skb);
Karen Xiea109a5b2008-12-18 22:56:20 -08001994 else {
1995 if (unlikely(pi->iscsi_ipv4addr && is_arp(skb)))
1996 cxgb3_arp_process(adap, skb);
Divy Le Rayb47385b2008-05-21 18:56:26 -07001997 netif_receive_skb(skb);
Karen Xiea109a5b2008-12-18 22:56:20 -08001998 }
Divy Le Rayb47385b2008-05-21 18:56:26 -07001999 } else
Divy Le Ray4d22de32007-01-18 22:04:14 -05002000 netif_rx(skb);
2001}
2002
Divy Le Rayb47385b2008-05-21 18:56:26 -07002003static inline int is_eth_tcp(u32 rss)
2004{
2005 return G_HASHTYPE(ntohl(rss)) == RSS_HASH_4_TUPLE;
2006}
2007
2008/**
Divy Le Rayb47385b2008-05-21 18:56:26 -07002009 * lro_add_page - add a page chunk to an LRO session
2010 * @adap: the adapter
2011 * @qs: the associated queue set
2012 * @fl: the free list containing the page chunk to add
2013 * @len: packet length
2014 * @complete: Indicates the last fragment of a frame
2015 *
2016 * Add a received packet contained in a page chunk to an existing LRO
2017 * session.
2018 */
2019static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2020 struct sge_fl *fl, int len, int complete)
2021{
2022 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
2023 struct cpl_rx_pkt *cpl;
Herbert Xu7be2df42009-01-21 14:39:13 -08002024 struct skb_frag_struct *rx_frag = qs->lro_frag_tbl.frags;
2025 int nr_frags = qs->lro_frag_tbl.nr_frags;
2026 int frag_len = qs->lro_frag_tbl.len;
Divy Le Rayb47385b2008-05-21 18:56:26 -07002027 int offset = 0;
2028
2029 if (!nr_frags) {
2030 offset = 2 + sizeof(struct cpl_rx_pkt);
2031 qs->lro_va = cpl = sd->pg_chunk.va + 2;
2032 }
2033
2034 fl->credits--;
2035
2036 len -= offset;
2037 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
2038 fl->buf_size, PCI_DMA_FROMDEVICE);
2039
Divy Le Rayb2b964f2009-03-12 21:13:59 +00002040 prefetch(&qs->lro_frag_tbl);
2041
Divy Le Rayb47385b2008-05-21 18:56:26 -07002042 rx_frag += nr_frags;
2043 rx_frag->page = sd->pg_chunk.page;
2044 rx_frag->page_offset = sd->pg_chunk.offset + offset;
2045 rx_frag->size = len;
2046 frag_len += len;
Herbert Xu7be2df42009-01-21 14:39:13 -08002047 qs->lro_frag_tbl.nr_frags++;
2048 qs->lro_frag_tbl.len = frag_len;
Divy Le Rayb47385b2008-05-21 18:56:26 -07002049
2050 if (!complete)
2051 return;
2052
Herbert Xu7be2df42009-01-21 14:39:13 -08002053 qs->lro_frag_tbl.ip_summed = CHECKSUM_UNNECESSARY;
Divy Le Rayb47385b2008-05-21 18:56:26 -07002054 cpl = qs->lro_va;
2055
2056 if (unlikely(cpl->vlan_valid)) {
2057 struct net_device *dev = qs->netdev;
2058 struct port_info *pi = netdev_priv(dev);
2059 struct vlan_group *grp = pi->vlan_grp;
2060
2061 if (likely(grp != NULL)) {
Herbert Xu7be2df42009-01-21 14:39:13 -08002062 vlan_gro_frags(&qs->napi, grp, ntohs(cpl->vlan),
2063 &qs->lro_frag_tbl);
2064 goto out;
Divy Le Rayb47385b2008-05-21 18:56:26 -07002065 }
2066 }
Herbert Xu7be2df42009-01-21 14:39:13 -08002067 napi_gro_frags(&qs->napi, &qs->lro_frag_tbl);
Divy Le Rayb47385b2008-05-21 18:56:26 -07002068
Herbert Xu7be2df42009-01-21 14:39:13 -08002069out:
2070 qs->lro_frag_tbl.nr_frags = qs->lro_frag_tbl.len = 0;
Divy Le Rayb47385b2008-05-21 18:56:26 -07002071}
2072
Divy Le Ray4d22de32007-01-18 22:04:14 -05002073/**
2074 * handle_rsp_cntrl_info - handles control information in a response
2075 * @qs: the queue set corresponding to the response
2076 * @flags: the response control flags
Divy Le Ray4d22de32007-01-18 22:04:14 -05002077 *
2078 * Handles the control information of an SGE response, such as GTS
2079 * indications and completion credits for the queue set's Tx queues.
Divy Le Ray6195c712007-01-30 19:43:56 -08002080 * HW coalesces credits, we don't do any extra SW coalescing.
Divy Le Ray4d22de32007-01-18 22:04:14 -05002081 */
Divy Le Ray6195c712007-01-30 19:43:56 -08002082static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002083{
2084 unsigned int credits;
2085
2086#if USE_GTS
2087 if (flags & F_RSPD_TXQ0_GTS)
2088 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
2089#endif
2090
Divy Le Ray4d22de32007-01-18 22:04:14 -05002091 credits = G_RSPD_TXQ0_CR(flags);
2092 if (credits)
2093 qs->txq[TXQ_ETH].processed += credits;
2094
Divy Le Ray6195c712007-01-30 19:43:56 -08002095 credits = G_RSPD_TXQ2_CR(flags);
2096 if (credits)
2097 qs->txq[TXQ_CTRL].processed += credits;
2098
Divy Le Ray4d22de32007-01-18 22:04:14 -05002099# if USE_GTS
2100 if (flags & F_RSPD_TXQ1_GTS)
2101 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
2102# endif
Divy Le Ray6195c712007-01-30 19:43:56 -08002103 credits = G_RSPD_TXQ1_CR(flags);
2104 if (credits)
2105 qs->txq[TXQ_OFLD].processed += credits;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002106}
2107
2108/**
2109 * check_ring_db - check if we need to ring any doorbells
2110 * @adapter: the adapter
2111 * @qs: the queue set whose Tx queues are to be examined
2112 * @sleeping: indicates which Tx queue sent GTS
2113 *
2114 * Checks if some of a queue set's Tx queues need to ring their doorbells
2115 * to resume transmission after idling while they still have unprocessed
2116 * descriptors.
2117 */
2118static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
2119 unsigned int sleeping)
2120{
2121 if (sleeping & F_RSPD_TXQ0_GTS) {
2122 struct sge_txq *txq = &qs->txq[TXQ_ETH];
2123
2124 if (txq->cleaned + txq->in_use != txq->processed &&
2125 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2126 set_bit(TXQ_RUNNING, &txq->flags);
2127 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2128 V_EGRCNTX(txq->cntxt_id));
2129 }
2130 }
2131
2132 if (sleeping & F_RSPD_TXQ1_GTS) {
2133 struct sge_txq *txq = &qs->txq[TXQ_OFLD];
2134
2135 if (txq->cleaned + txq->in_use != txq->processed &&
2136 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2137 set_bit(TXQ_RUNNING, &txq->flags);
2138 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2139 V_EGRCNTX(txq->cntxt_id));
2140 }
2141 }
2142}
2143
2144/**
2145 * is_new_response - check if a response is newly written
2146 * @r: the response descriptor
2147 * @q: the response queue
2148 *
2149 * Returns true if a response descriptor contains a yet unprocessed
2150 * response.
2151 */
2152static inline int is_new_response(const struct rsp_desc *r,
2153 const struct sge_rspq *q)
2154{
2155 return (r->intr_gen & F_RSPD_GEN2) == q->gen;
2156}
2157
Divy Le Ray7385ecf2008-05-21 18:56:21 -07002158static inline void clear_rspq_bufstate(struct sge_rspq * const q)
2159{
2160 q->pg_skb = NULL;
2161 q->rx_recycle_buf = 0;
2162}
2163
Divy Le Ray4d22de32007-01-18 22:04:14 -05002164#define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
2165#define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
2166 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
2167 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
2168 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
2169
2170/* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
2171#define NOMEM_INTR_DELAY 2500
2172
2173/**
2174 * process_responses - process responses from an SGE response queue
2175 * @adap: the adapter
2176 * @qs: the queue set to which the response queue belongs
2177 * @budget: how many responses can be processed in this round
2178 *
2179 * Process responses from an SGE response queue up to the supplied budget.
2180 * Responses include received packets as well as credits and other events
2181 * for the queues that belong to the response queue's queue set.
2182 * A negative budget is effectively unlimited.
2183 *
2184 * Additionally choose the interrupt holdoff time for the next interrupt
2185 * on this queue. If the system is under memory shortage use a fairly
2186 * long delay to help recovery.
2187 */
2188static int process_responses(struct adapter *adap, struct sge_qset *qs,
2189 int budget)
2190{
2191 struct sge_rspq *q = &qs->rspq;
2192 struct rsp_desc *r = &q->desc[q->cidx];
2193 int budget_left = budget;
Divy Le Ray6195c712007-01-30 19:43:56 -08002194 unsigned int sleeping = 0;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002195 struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
2196 int ngathered = 0;
2197
2198 q->next_holdoff = q->holdoff_tmr;
2199
2200 while (likely(budget_left && is_new_response(r, q))) {
Divy Le Rayb47385b2008-05-21 18:56:26 -07002201 int packet_complete, eth, ethpad = 2, lro = qs->lro_enabled;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002202 struct sk_buff *skb = NULL;
2203 u32 len, flags = ntohl(r->flags);
Divy Le Ray7385ecf2008-05-21 18:56:21 -07002204 __be32 rss_hi = *(const __be32 *)r,
2205 rss_lo = r->rss_hdr.rss_hash_val;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002206
2207 eth = r->rss_hdr.opcode == CPL_RX_PKT;
2208
2209 if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
2210 skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
2211 if (!skb)
2212 goto no_mem;
2213
2214 memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE);
2215 skb->data[0] = CPL_ASYNC_NOTIF;
2216 rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
2217 q->async_notif++;
2218 } else if (flags & F_RSPD_IMM_DATA_VALID) {
2219 skb = get_imm_packet(r);
2220 if (unlikely(!skb)) {
Divy Le Raycf992af2007-05-30 21:10:47 -07002221no_mem:
Divy Le Ray4d22de32007-01-18 22:04:14 -05002222 q->next_holdoff = NOMEM_INTR_DELAY;
2223 q->nomem++;
2224 /* consume one credit since we tried */
2225 budget_left--;
2226 break;
2227 }
2228 q->imm_data++;
Divy Le Raye0994eb2007-02-24 16:44:17 -08002229 ethpad = 0;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002230 } else if ((len = ntohl(r->len_cq)) != 0) {
Divy Le Raycf992af2007-05-30 21:10:47 -07002231 struct sge_fl *fl;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002232
Divy Le Ray65ab8382009-02-04 16:31:39 -08002233 lro &= eth && is_eth_tcp(rss_hi);
Divy Le Rayb47385b2008-05-21 18:56:26 -07002234
Divy Le Raycf992af2007-05-30 21:10:47 -07002235 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2236 if (fl->use_pages) {
2237 void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
Divy Le Raye0994eb2007-02-24 16:44:17 -08002238
Divy Le Raycf992af2007-05-30 21:10:47 -07002239 prefetch(addr);
2240#if L1_CACHE_BYTES < 128
2241 prefetch(addr + L1_CACHE_BYTES);
2242#endif
Divy Le Raye0994eb2007-02-24 16:44:17 -08002243 __refill_fl(adap, fl);
Divy Le Rayb47385b2008-05-21 18:56:26 -07002244 if (lro > 0) {
2245 lro_add_page(adap, qs, fl,
2246 G_RSPD_LEN(len),
2247 flags & F_RSPD_EOP);
2248 goto next_fl;
2249 }
Divy Le Raye0994eb2007-02-24 16:44:17 -08002250
Divy Le Ray7385ecf2008-05-21 18:56:21 -07002251 skb = get_packet_pg(adap, fl, q,
2252 G_RSPD_LEN(len),
2253 eth ?
2254 SGE_RX_DROP_THRES : 0);
2255 q->pg_skb = skb;
Divy Le Raycf992af2007-05-30 21:10:47 -07002256 } else
Divy Le Raye0994eb2007-02-24 16:44:17 -08002257 skb = get_packet(adap, fl, G_RSPD_LEN(len),
2258 eth ? SGE_RX_DROP_THRES : 0);
Divy Le Raycf992af2007-05-30 21:10:47 -07002259 if (unlikely(!skb)) {
2260 if (!eth)
2261 goto no_mem;
2262 q->rx_drops++;
2263 } else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT))
2264 __skb_pull(skb, 2);
Divy Le Rayb47385b2008-05-21 18:56:26 -07002265next_fl:
Divy Le Ray4d22de32007-01-18 22:04:14 -05002266 if (++fl->cidx == fl->size)
2267 fl->cidx = 0;
2268 } else
2269 q->pure_rsps++;
2270
2271 if (flags & RSPD_CTRL_MASK) {
2272 sleeping |= flags & RSPD_GTS_MASK;
Divy Le Ray6195c712007-01-30 19:43:56 -08002273 handle_rsp_cntrl_info(qs, flags);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002274 }
2275
2276 r++;
2277 if (unlikely(++q->cidx == q->size)) {
2278 q->cidx = 0;
2279 q->gen ^= 1;
2280 r = q->desc;
2281 }
2282 prefetch(r);
2283
2284 if (++q->credits >= (q->size / 4)) {
2285 refill_rspq(adap, q, q->credits);
2286 q->credits = 0;
2287 }
2288
Divy Le Ray7385ecf2008-05-21 18:56:21 -07002289 packet_complete = flags &
2290 (F_RSPD_EOP | F_RSPD_IMM_DATA_VALID |
2291 F_RSPD_ASYNC_NOTIF);
2292
2293 if (skb != NULL && packet_complete) {
Divy Le Ray4d22de32007-01-18 22:04:14 -05002294 if (eth)
Divy Le Rayb47385b2008-05-21 18:56:26 -07002295 rx_eth(adap, q, skb, ethpad, lro);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002296 else {
Divy Le Rayafefce62007-11-16 11:22:21 -08002297 q->offload_pkts++;
Divy Le Raycf992af2007-05-30 21:10:47 -07002298 /* Preserve the RSS info in csum & priority */
2299 skb->csum = rss_hi;
2300 skb->priority = rss_lo;
2301 ngathered = rx_offload(&adap->tdev, q, skb,
2302 offload_skbs,
Divy Le Raye0994eb2007-02-24 16:44:17 -08002303 ngathered);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002304 }
Divy Le Ray7385ecf2008-05-21 18:56:21 -07002305
2306 if (flags & F_RSPD_EOP)
Divy Le Rayb47385b2008-05-21 18:56:26 -07002307 clear_rspq_bufstate(q);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002308 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05002309 --budget_left;
2310 }
2311
Divy Le Ray4d22de32007-01-18 22:04:14 -05002312 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
Divy Le Rayb47385b2008-05-21 18:56:26 -07002313
Divy Le Ray4d22de32007-01-18 22:04:14 -05002314 if (sleeping)
2315 check_ring_db(adap, qs, sleeping);
2316
2317 smp_mb(); /* commit Tx queue .processed updates */
2318 if (unlikely(qs->txq_stopped != 0))
2319 restart_tx(qs);
2320
2321 budget -= budget_left;
2322 return budget;
2323}
2324
2325static inline int is_pure_response(const struct rsp_desc *r)
2326{
Roland Dreierc5419e62008-11-28 21:55:42 -08002327 __be32 n = r->flags & htonl(F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002328
2329 return (n | r->len_cq) == 0;
2330}
2331
2332/**
2333 * napi_rx_handler - the NAPI handler for Rx processing
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002334 * @napi: the napi instance
Divy Le Ray4d22de32007-01-18 22:04:14 -05002335 * @budget: how many packets we can process in this round
2336 *
2337 * Handler for new data events when using NAPI.
2338 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002339static int napi_rx_handler(struct napi_struct *napi, int budget)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002340{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002341 struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
2342 struct adapter *adap = qs->adap;
2343 int work_done = process_responses(adap, qs, budget);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002344
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002345 if (likely(work_done < budget)) {
2346 napi_complete(napi);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002347
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002348 /*
2349 * Because we don't atomically flush the following
2350 * write it is possible that in very rare cases it can
2351 * reach the device in a way that races with a new
2352 * response being written plus an error interrupt
2353 * causing the NAPI interrupt handler below to return
2354 * unhandled status to the OS. To protect against
2355 * this would require flushing the write and doing
2356 * both the write and the flush with interrupts off.
2357 * Way too expensive and unjustifiable given the
2358 * rarity of the race.
2359 *
2360 * The race cannot happen at all with MSI-X.
2361 */
2362 t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
2363 V_NEWTIMER(qs->rspq.next_holdoff) |
2364 V_NEWINDEX(qs->rspq.cidx));
2365 }
2366 return work_done;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002367}
2368
2369/*
2370 * Returns true if the device is already scheduled for polling.
2371 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002372static inline int napi_is_scheduled(struct napi_struct *napi)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002373{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002374 return test_bit(NAPI_STATE_SCHED, &napi->state);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002375}
2376
2377/**
2378 * process_pure_responses - process pure responses from a response queue
2379 * @adap: the adapter
2380 * @qs: the queue set owning the response queue
2381 * @r: the first pure response to process
2382 *
2383 * A simpler version of process_responses() that handles only pure (i.e.,
2384 * non data-carrying) responses. Such respones are too light-weight to
2385 * justify calling a softirq under NAPI, so we handle them specially in
2386 * the interrupt handler. The function is called with a pointer to a
2387 * response, which the caller must ensure is a valid pure response.
2388 *
2389 * Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
2390 */
2391static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
2392 struct rsp_desc *r)
2393{
2394 struct sge_rspq *q = &qs->rspq;
Divy Le Ray6195c712007-01-30 19:43:56 -08002395 unsigned int sleeping = 0;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002396
2397 do {
2398 u32 flags = ntohl(r->flags);
2399
2400 r++;
2401 if (unlikely(++q->cidx == q->size)) {
2402 q->cidx = 0;
2403 q->gen ^= 1;
2404 r = q->desc;
2405 }
2406 prefetch(r);
2407
2408 if (flags & RSPD_CTRL_MASK) {
2409 sleeping |= flags & RSPD_GTS_MASK;
Divy Le Ray6195c712007-01-30 19:43:56 -08002410 handle_rsp_cntrl_info(qs, flags);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002411 }
2412
2413 q->pure_rsps++;
2414 if (++q->credits >= (q->size / 4)) {
2415 refill_rspq(adap, q, q->credits);
2416 q->credits = 0;
2417 }
2418 } while (is_new_response(r, q) && is_pure_response(r));
2419
Divy Le Ray4d22de32007-01-18 22:04:14 -05002420 if (sleeping)
2421 check_ring_db(adap, qs, sleeping);
2422
2423 smp_mb(); /* commit Tx queue .processed updates */
2424 if (unlikely(qs->txq_stopped != 0))
2425 restart_tx(qs);
2426
2427 return is_new_response(r, q);
2428}
2429
2430/**
2431 * handle_responses - decide what to do with new responses in NAPI mode
2432 * @adap: the adapter
2433 * @q: the response queue
2434 *
2435 * This is used by the NAPI interrupt handlers to decide what to do with
2436 * new SGE responses. If there are no new responses it returns -1. If
2437 * there are new responses and they are pure (i.e., non-data carrying)
2438 * it handles them straight in hard interrupt context as they are very
2439 * cheap and don't deliver any packets. Finally, if there are any data
2440 * signaling responses it schedules the NAPI handler. Returns 1 if it
2441 * schedules NAPI, 0 if all new responses were pure.
2442 *
2443 * The caller must ascertain NAPI is not already running.
2444 */
2445static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
2446{
2447 struct sge_qset *qs = rspq_to_qset(q);
2448 struct rsp_desc *r = &q->desc[q->cidx];
2449
2450 if (!is_new_response(r, q))
2451 return -1;
2452 if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
2453 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2454 V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
2455 return 0;
2456 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002457 napi_schedule(&qs->napi);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002458 return 1;
2459}
2460
2461/*
2462 * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
2463 * (i.e., response queue serviced in hard interrupt).
2464 */
2465irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
2466{
2467 struct sge_qset *qs = cookie;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002468 struct adapter *adap = qs->adap;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002469 struct sge_rspq *q = &qs->rspq;
2470
2471 spin_lock(&q->lock);
2472 if (process_responses(adap, qs, -1) == 0)
2473 q->unhandled_irqs++;
2474 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2475 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2476 spin_unlock(&q->lock);
2477 return IRQ_HANDLED;
2478}
2479
2480/*
2481 * The MSI-X interrupt handler for an SGE response queue for the NAPI case
2482 * (i.e., response queue serviced by NAPI polling).
2483 */
Stephen Hemminger9265fab2007-10-08 16:22:29 -07002484static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002485{
2486 struct sge_qset *qs = cookie;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002487 struct sge_rspq *q = &qs->rspq;
2488
2489 spin_lock(&q->lock);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002490
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002491 if (handle_responses(qs->adap, q) < 0)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002492 q->unhandled_irqs++;
2493 spin_unlock(&q->lock);
2494 return IRQ_HANDLED;
2495}
2496
2497/*
2498 * The non-NAPI MSI interrupt handler. This needs to handle data events from
2499 * SGE response queues as well as error and other async events as they all use
2500 * the same MSI vector. We use one SGE response queue per port in this mode
2501 * and protect all response queues with queue 0's lock.
2502 */
2503static irqreturn_t t3_intr_msi(int irq, void *cookie)
2504{
2505 int new_packets = 0;
2506 struct adapter *adap = cookie;
2507 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2508
2509 spin_lock(&q->lock);
2510
2511 if (process_responses(adap, &adap->sge.qs[0], -1)) {
2512 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2513 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2514 new_packets = 1;
2515 }
2516
2517 if (adap->params.nports == 2 &&
2518 process_responses(adap, &adap->sge.qs[1], -1)) {
2519 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2520
2521 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
2522 V_NEWTIMER(q1->next_holdoff) |
2523 V_NEWINDEX(q1->cidx));
2524 new_packets = 1;
2525 }
2526
2527 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2528 q->unhandled_irqs++;
2529
2530 spin_unlock(&q->lock);
2531 return IRQ_HANDLED;
2532}
2533
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002534static int rspq_check_napi(struct sge_qset *qs)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002535{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002536 struct sge_rspq *q = &qs->rspq;
2537
2538 if (!napi_is_scheduled(&qs->napi) &&
2539 is_new_response(&q->desc[q->cidx], q)) {
2540 napi_schedule(&qs->napi);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002541 return 1;
2542 }
2543 return 0;
2544}
2545
2546/*
2547 * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
2548 * by NAPI polling). Handles data events from SGE response queues as well as
2549 * error and other async events as they all use the same MSI vector. We use
2550 * one SGE response queue per port in this mode and protect all response
2551 * queues with queue 0's lock.
2552 */
Stephen Hemminger9265fab2007-10-08 16:22:29 -07002553static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002554{
2555 int new_packets;
2556 struct adapter *adap = cookie;
2557 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2558
2559 spin_lock(&q->lock);
2560
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002561 new_packets = rspq_check_napi(&adap->sge.qs[0]);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002562 if (adap->params.nports == 2)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002563 new_packets += rspq_check_napi(&adap->sge.qs[1]);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002564 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2565 q->unhandled_irqs++;
2566
2567 spin_unlock(&q->lock);
2568 return IRQ_HANDLED;
2569}
2570
2571/*
2572 * A helper function that processes responses and issues GTS.
2573 */
2574static inline int process_responses_gts(struct adapter *adap,
2575 struct sge_rspq *rq)
2576{
2577 int work;
2578
2579 work = process_responses(adap, rspq_to_qset(rq), -1);
2580 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
2581 V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
2582 return work;
2583}
2584
2585/*
2586 * The legacy INTx interrupt handler. This needs to handle data events from
2587 * SGE response queues as well as error and other async events as they all use
2588 * the same interrupt pin. We use one SGE response queue per port in this mode
2589 * and protect all response queues with queue 0's lock.
2590 */
2591static irqreturn_t t3_intr(int irq, void *cookie)
2592{
2593 int work_done, w0, w1;
2594 struct adapter *adap = cookie;
2595 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2596 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2597
2598 spin_lock(&q0->lock);
2599
2600 w0 = is_new_response(&q0->desc[q0->cidx], q0);
2601 w1 = adap->params.nports == 2 &&
2602 is_new_response(&q1->desc[q1->cidx], q1);
2603
2604 if (likely(w0 | w1)) {
2605 t3_write_reg(adap, A_PL_CLI, 0);
2606 t3_read_reg(adap, A_PL_CLI); /* flush */
2607
2608 if (likely(w0))
2609 process_responses_gts(adap, q0);
2610
2611 if (w1)
2612 process_responses_gts(adap, q1);
2613
2614 work_done = w0 | w1;
2615 } else
2616 work_done = t3_slow_intr_handler(adap);
2617
2618 spin_unlock(&q0->lock);
2619 return IRQ_RETVAL(work_done != 0);
2620}
2621
2622/*
2623 * Interrupt handler for legacy INTx interrupts for T3B-based cards.
2624 * Handles data events from SGE response queues as well as error and other
2625 * async events as they all use the same interrupt pin. We use one SGE
2626 * response queue per port in this mode and protect all response queues with
2627 * queue 0's lock.
2628 */
2629static irqreturn_t t3b_intr(int irq, void *cookie)
2630{
2631 u32 map;
2632 struct adapter *adap = cookie;
2633 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2634
2635 t3_write_reg(adap, A_PL_CLI, 0);
2636 map = t3_read_reg(adap, A_SG_DATA_INTR);
2637
2638 if (unlikely(!map)) /* shared interrupt, most likely */
2639 return IRQ_NONE;
2640
2641 spin_lock(&q0->lock);
2642
2643 if (unlikely(map & F_ERRINTR))
2644 t3_slow_intr_handler(adap);
2645
2646 if (likely(map & 1))
2647 process_responses_gts(adap, q0);
2648
2649 if (map & 2)
2650 process_responses_gts(adap, &adap->sge.qs[1].rspq);
2651
2652 spin_unlock(&q0->lock);
2653 return IRQ_HANDLED;
2654}
2655
2656/*
2657 * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
2658 * Handles data events from SGE response queues as well as error and other
2659 * async events as they all use the same interrupt pin. We use one SGE
2660 * response queue per port in this mode and protect all response queues with
2661 * queue 0's lock.
2662 */
2663static irqreturn_t t3b_intr_napi(int irq, void *cookie)
2664{
2665 u32 map;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002666 struct adapter *adap = cookie;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002667 struct sge_qset *qs0 = &adap->sge.qs[0];
2668 struct sge_rspq *q0 = &qs0->rspq;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002669
2670 t3_write_reg(adap, A_PL_CLI, 0);
2671 map = t3_read_reg(adap, A_SG_DATA_INTR);
2672
2673 if (unlikely(!map)) /* shared interrupt, most likely */
2674 return IRQ_NONE;
2675
2676 spin_lock(&q0->lock);
2677
2678 if (unlikely(map & F_ERRINTR))
2679 t3_slow_intr_handler(adap);
2680
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002681 if (likely(map & 1))
2682 napi_schedule(&qs0->napi);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002683
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002684 if (map & 2)
2685 napi_schedule(&adap->sge.qs[1].napi);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002686
2687 spin_unlock(&q0->lock);
2688 return IRQ_HANDLED;
2689}
2690
2691/**
2692 * t3_intr_handler - select the top-level interrupt handler
2693 * @adap: the adapter
2694 * @polling: whether using NAPI to service response queues
2695 *
2696 * Selects the top-level interrupt handler based on the type of interrupts
2697 * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the
2698 * response queues.
2699 */
Jeff Garzik7c239972007-10-19 03:12:20 -04002700irq_handler_t t3_intr_handler(struct adapter *adap, int polling)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002701{
2702 if (adap->flags & USING_MSIX)
2703 return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
2704 if (adap->flags & USING_MSI)
2705 return polling ? t3_intr_msi_napi : t3_intr_msi;
2706 if (adap->params.rev > 0)
2707 return polling ? t3b_intr_napi : t3b_intr;
2708 return t3_intr;
2709}
2710
Divy Le Rayb8819552007-12-17 18:47:31 -08002711#define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
2712 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
2713 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
2714 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
2715 F_HIRCQPARITYERROR)
2716#define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
2717#define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
2718 F_RSPQDISABLED)
2719
Divy Le Ray4d22de32007-01-18 22:04:14 -05002720/**
2721 * t3_sge_err_intr_handler - SGE async event interrupt handler
2722 * @adapter: the adapter
2723 *
2724 * Interrupt handler for SGE asynchronous (non-data) events.
2725 */
2726void t3_sge_err_intr_handler(struct adapter *adapter)
2727{
Divy Le Rayfc882192009-03-12 21:14:09 +00002728 unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE) &
2729 ~F_FLEMPTY;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002730
Divy Le Rayb8819552007-12-17 18:47:31 -08002731 if (status & SGE_PARERR)
2732 CH_ALERT(adapter, "SGE parity error (0x%x)\n",
2733 status & SGE_PARERR);
2734 if (status & SGE_FRAMINGERR)
2735 CH_ALERT(adapter, "SGE framing error (0x%x)\n",
2736 status & SGE_FRAMINGERR);
2737
Divy Le Ray4d22de32007-01-18 22:04:14 -05002738 if (status & F_RSPQCREDITOVERFOW)
2739 CH_ALERT(adapter, "SGE response queue credit overflow\n");
2740
2741 if (status & F_RSPQDISABLED) {
2742 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
2743
2744 CH_ALERT(adapter,
2745 "packet delivered to disabled response queue "
2746 "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
2747 }
2748
Divy Le Ray6e3f03b2007-08-21 20:49:10 -07002749 if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR))
2750 CH_ALERT(adapter, "SGE dropped %s priority doorbell\n",
2751 status & F_HIPIODRBDROPERR ? "high" : "lo");
2752
Divy Le Ray4d22de32007-01-18 22:04:14 -05002753 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
Divy Le Rayb8819552007-12-17 18:47:31 -08002754 if (status & SGE_FATALERR)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002755 t3_fatal_err(adapter);
2756}
2757
2758/**
Divy Le Ray42c8ea12009-03-12 21:14:04 +00002759 * sge_timer_tx - perform periodic maintenance of an SGE qset
Divy Le Ray4d22de32007-01-18 22:04:14 -05002760 * @data: the SGE queue set to maintain
2761 *
2762 * Runs periodically from a timer to perform maintenance of an SGE queue
2763 * set. It performs two tasks:
2764 *
Divy Le Ray42c8ea12009-03-12 21:14:04 +00002765 * Cleans up any completed Tx descriptors that may still be pending.
Divy Le Ray4d22de32007-01-18 22:04:14 -05002766 * Normal descriptor cleanup happens when new packets are added to a Tx
2767 * queue so this timer is relatively infrequent and does any cleanup only
2768 * if the Tx queue has not seen any new packets in a while. We make a
2769 * best effort attempt to reclaim descriptors, in that we don't wait
2770 * around if we cannot get a queue's lock (which most likely is because
2771 * someone else is queueing new packets and so will also handle the clean
2772 * up). Since control queues use immediate data exclusively we don't
2773 * bother cleaning them up here.
2774 *
Divy Le Ray42c8ea12009-03-12 21:14:04 +00002775 */
2776static void sge_timer_tx(unsigned long data)
2777{
2778 struct sge_qset *qs = (struct sge_qset *)data;
2779 struct port_info *pi = netdev_priv(qs->netdev);
2780 struct adapter *adap = pi->adapter;
2781 unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0};
2782 unsigned long next_period;
2783
2784 if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
2785 tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH],
2786 TX_RECLAIM_TIMER_CHUNK);
2787 spin_unlock(&qs->txq[TXQ_ETH].lock);
2788 }
2789 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2790 tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD],
2791 TX_RECLAIM_TIMER_CHUNK);
2792 spin_unlock(&qs->txq[TXQ_OFLD].lock);
2793 }
2794
2795 next_period = TX_RECLAIM_PERIOD >>
2796 (max(tbd[TXQ_ETH], tbd[TXQ_OFLD]) /
2797 TX_RECLAIM_TIMER_CHUNK);
2798 mod_timer(&qs->tx_reclaim_timer, jiffies + next_period);
2799}
2800
2801/*
2802 * sge_timer_rx - perform periodic maintenance of an SGE qset
2803 * @data: the SGE queue set to maintain
2804 *
2805 * a) Replenishes Rx queues that have run out due to memory shortage.
Divy Le Ray4d22de32007-01-18 22:04:14 -05002806 * Normally new Rx buffers are added when existing ones are consumed but
2807 * when out of memory a queue can become empty. We try to add only a few
2808 * buffers here, the queue will be replenished fully as these new buffers
2809 * are used up if memory shortage has subsided.
Divy Le Ray42c8ea12009-03-12 21:14:04 +00002810 *
2811 * b) Return coalesced response queue credits in case a response queue is
2812 * starved.
2813 *
Divy Le Ray4d22de32007-01-18 22:04:14 -05002814 */
Divy Le Ray42c8ea12009-03-12 21:14:04 +00002815static void sge_timer_rx(unsigned long data)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002816{
2817 spinlock_t *lock;
2818 struct sge_qset *qs = (struct sge_qset *)data;
Divy Le Ray42c8ea12009-03-12 21:14:04 +00002819 struct port_info *pi = netdev_priv(qs->netdev);
2820 struct adapter *adap = pi->adapter;
2821 u32 status;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002822
Divy Le Ray42c8ea12009-03-12 21:14:04 +00002823 lock = adap->params.rev > 0 ?
2824 &qs->rspq.lock : &adap->sge.qs[0].rspq.lock;
Divy Le Raybae73f42007-02-24 16:44:12 -08002825
Divy Le Ray42c8ea12009-03-12 21:14:04 +00002826 if (!spin_trylock_irq(lock))
2827 goto out;
Divy Le Raybae73f42007-02-24 16:44:12 -08002828
Divy Le Ray42c8ea12009-03-12 21:14:04 +00002829 if (napi_is_scheduled(&qs->napi))
2830 goto unlock;
2831
2832 if (adap->params.rev < 4) {
2833 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
2834
2835 if (status & (1 << qs->rspq.cntxt_id)) {
2836 qs->rspq.starved++;
2837 if (qs->rspq.credits) {
2838 qs->rspq.credits--;
2839 refill_rspq(adap, &qs->rspq, 1);
2840 qs->rspq.restarted++;
2841 t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
2842 1 << qs->rspq.cntxt_id);
Divy Le Raybae73f42007-02-24 16:44:12 -08002843 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05002844 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05002845 }
Divy Le Ray42c8ea12009-03-12 21:14:04 +00002846
2847 if (qs->fl[0].credits < qs->fl[0].size)
2848 __refill_fl(adap, &qs->fl[0]);
2849 if (qs->fl[1].credits < qs->fl[1].size)
2850 __refill_fl(adap, &qs->fl[1]);
2851
2852unlock:
2853 spin_unlock_irq(lock);
2854out:
2855 mod_timer(&qs->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002856}
2857
2858/**
2859 * t3_update_qset_coalesce - update coalescing settings for a queue set
2860 * @qs: the SGE queue set
2861 * @p: new queue set parameters
2862 *
2863 * Update the coalescing settings for an SGE queue set. Nothing is done
2864 * if the queue set is not initialized yet.
2865 */
2866void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
2867{
Divy Le Ray4d22de32007-01-18 22:04:14 -05002868 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
2869 qs->rspq.polling = p->polling;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002870 qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002871}
2872
2873/**
2874 * t3_sge_alloc_qset - initialize an SGE queue set
2875 * @adapter: the adapter
2876 * @id: the queue set id
2877 * @nports: how many Ethernet ports will be using this queue set
2878 * @irq_vec_idx: the IRQ vector index for response queue interrupts
2879 * @p: configuration parameters for this queue set
2880 * @ntxq: number of Tx queues for the queue set
2881 * @netdev: net device associated with this queue set
Divy Le Ray82ad3322008-12-16 01:09:39 -08002882 * @netdevq: net device TX queue associated with this queue set
Divy Le Ray4d22de32007-01-18 22:04:14 -05002883 *
2884 * Allocate resources and initialize an SGE queue set. A queue set
2885 * comprises a response queue, two Rx free-buffer queues, and up to 3
2886 * Tx queues. The Tx queues are assigned roles in the order Ethernet
2887 * queue, offload queue, and control queue.
2888 */
2889int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2890 int irq_vec_idx, const struct qset_params *p,
Divy Le Ray82ad3322008-12-16 01:09:39 -08002891 int ntxq, struct net_device *dev,
2892 struct netdev_queue *netdevq)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002893{
Divy Le Rayb1fb1f22008-05-21 18:56:16 -07002894 int i, avail, ret = -ENOMEM;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002895 struct sge_qset *q = &adapter->sge.qs[id];
2896
2897 init_qset_cntxt(q, id);
Divy Le Ray42c8ea12009-03-12 21:14:04 +00002898 setup_timer(&q->tx_reclaim_timer, sge_timer_tx, (unsigned long)q);
2899 setup_timer(&q->rx_reclaim_timer, sge_timer_rx, (unsigned long)q);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002900
2901 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
2902 sizeof(struct rx_desc),
2903 sizeof(struct rx_sw_desc),
2904 &q->fl[0].phys_addr, &q->fl[0].sdesc);
2905 if (!q->fl[0].desc)
2906 goto err;
2907
2908 q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
2909 sizeof(struct rx_desc),
2910 sizeof(struct rx_sw_desc),
2911 &q->fl[1].phys_addr, &q->fl[1].sdesc);
2912 if (!q->fl[1].desc)
2913 goto err;
2914
2915 q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
2916 sizeof(struct rsp_desc), 0,
2917 &q->rspq.phys_addr, NULL);
2918 if (!q->rspq.desc)
2919 goto err;
2920
2921 for (i = 0; i < ntxq; ++i) {
2922 /*
2923 * The control queue always uses immediate data so does not
2924 * need to keep track of any sk_buffs.
2925 */
2926 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
2927
2928 q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
2929 sizeof(struct tx_desc), sz,
2930 &q->txq[i].phys_addr,
2931 &q->txq[i].sdesc);
2932 if (!q->txq[i].desc)
2933 goto err;
2934
2935 q->txq[i].gen = 1;
2936 q->txq[i].size = p->txq_size[i];
2937 spin_lock_init(&q->txq[i].lock);
2938 skb_queue_head_init(&q->txq[i].sendq);
2939 }
2940
2941 tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq,
2942 (unsigned long)q);
2943 tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq,
2944 (unsigned long)q);
2945
2946 q->fl[0].gen = q->fl[1].gen = 1;
2947 q->fl[0].size = p->fl_size;
2948 q->fl[1].size = p->jumbo_size;
2949
2950 q->rspq.gen = 1;
2951 q->rspq.size = p->rspq_size;
2952 spin_lock_init(&q->rspq.lock);
David S. Miller147e70e2008-09-22 01:29:52 -07002953 skb_queue_head_init(&q->rspq.rx_queue);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002954
2955 q->txq[TXQ_ETH].stop_thres = nports *
2956 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
2957
Divy Le Raycf992af2007-05-30 21:10:47 -07002958#if FL0_PG_CHUNK_SIZE > 0
2959 q->fl[0].buf_size = FL0_PG_CHUNK_SIZE;
Divy Le Raye0994eb2007-02-24 16:44:17 -08002960#else
Divy Le Raycf992af2007-05-30 21:10:47 -07002961 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data);
Divy Le Raye0994eb2007-02-24 16:44:17 -08002962#endif
Divy Le Ray7385ecf2008-05-21 18:56:21 -07002963#if FL1_PG_CHUNK_SIZE > 0
2964 q->fl[1].buf_size = FL1_PG_CHUNK_SIZE;
2965#else
Divy Le Raycf992af2007-05-30 21:10:47 -07002966 q->fl[1].buf_size = is_offload(adapter) ?
2967 (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
2968 MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt);
Divy Le Ray7385ecf2008-05-21 18:56:21 -07002969#endif
2970
2971 q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0;
2972 q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0;
2973 q->fl[0].order = FL0_PG_ORDER;
2974 q->fl[1].order = FL1_PG_ORDER;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002975
Roland Dreierb1186de2008-03-20 13:30:48 -07002976 spin_lock_irq(&adapter->sge.reg_lock);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002977
2978 /* FL threshold comparison uses < */
2979 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
2980 q->rspq.phys_addr, q->rspq.size,
2981 q->fl[0].buf_size, 1, 0);
2982 if (ret)
2983 goto err_unlock;
2984
2985 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
2986 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
2987 q->fl[i].phys_addr, q->fl[i].size,
2988 q->fl[i].buf_size, p->cong_thres, 1,
2989 0);
2990 if (ret)
2991 goto err_unlock;
2992 }
2993
2994 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
2995 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
2996 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
2997 1, 0);
2998 if (ret)
2999 goto err_unlock;
3000
3001 if (ntxq > 1) {
3002 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
3003 USE_GTS, SGE_CNTXT_OFLD, id,
3004 q->txq[TXQ_OFLD].phys_addr,
3005 q->txq[TXQ_OFLD].size, 0, 1, 0);
3006 if (ret)
3007 goto err_unlock;
3008 }
3009
3010 if (ntxq > 2) {
3011 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
3012 SGE_CNTXT_CTRL, id,
3013 q->txq[TXQ_CTRL].phys_addr,
3014 q->txq[TXQ_CTRL].size,
3015 q->txq[TXQ_CTRL].token, 1, 0);
3016 if (ret)
3017 goto err_unlock;
3018 }
3019
Roland Dreierb1186de2008-03-20 13:30:48 -07003020 spin_unlock_irq(&adapter->sge.reg_lock);
Divy Le Ray4d22de32007-01-18 22:04:14 -05003021
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003022 q->adap = adapter;
3023 q->netdev = dev;
Divy Le Ray82ad3322008-12-16 01:09:39 -08003024 q->tx_q = netdevq;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003025 t3_update_qset_coalesce(q, p);
Divy Le Rayb47385b2008-05-21 18:56:26 -07003026
Divy Le Ray7385ecf2008-05-21 18:56:21 -07003027 avail = refill_fl(adapter, &q->fl[0], q->fl[0].size,
3028 GFP_KERNEL | __GFP_COMP);
Divy Le Rayb1fb1f22008-05-21 18:56:16 -07003029 if (!avail) {
3030 CH_ALERT(adapter, "free list queue 0 initialization failed\n");
3031 goto err;
3032 }
3033 if (avail < q->fl[0].size)
3034 CH_WARN(adapter, "free list queue 0 enabled with %d credits\n",
3035 avail);
Divy Le Ray4d22de32007-01-18 22:04:14 -05003036
Divy Le Ray7385ecf2008-05-21 18:56:21 -07003037 avail = refill_fl(adapter, &q->fl[1], q->fl[1].size,
3038 GFP_KERNEL | __GFP_COMP);
Divy Le Rayb1fb1f22008-05-21 18:56:16 -07003039 if (avail < q->fl[1].size)
3040 CH_WARN(adapter, "free list queue 1 enabled with %d credits\n",
3041 avail);
Divy Le Ray4d22de32007-01-18 22:04:14 -05003042 refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
3043
3044 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
3045 V_NEWTIMER(q->rspq.holdoff_tmr));
3046
Divy Le Ray4d22de32007-01-18 22:04:14 -05003047 return 0;
3048
Divy Le Rayb1fb1f22008-05-21 18:56:16 -07003049err_unlock:
Roland Dreierb1186de2008-03-20 13:30:48 -07003050 spin_unlock_irq(&adapter->sge.reg_lock);
Divy Le Rayb1fb1f22008-05-21 18:56:16 -07003051err:
Divy Le Ray4d22de32007-01-18 22:04:14 -05003052 t3_free_qset(adapter, q);
3053 return ret;
3054}
3055
3056/**
Divy Le Ray31563782009-03-26 16:39:09 +00003057 * t3_start_sge_timers - start SGE timer call backs
3058 * @adap: the adapter
3059 *
3060 * Starts each SGE queue set's timer call back
3061 */
3062void t3_start_sge_timers(struct adapter *adap)
3063{
3064 int i;
3065
3066 for (i = 0; i < SGE_QSETS; ++i) {
3067 struct sge_qset *q = &adap->sge.qs[i];
3068
3069 if (q->tx_reclaim_timer.function)
3070 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
3071
3072 if (q->rx_reclaim_timer.function)
3073 mod_timer(&q->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
3074 }
3075}
3076
3077/**
Divy Le Ray0ca41c02008-09-25 14:05:28 +00003078 * t3_stop_sge_timers - stop SGE timer call backs
3079 * @adap: the adapter
3080 *
3081 * Stops each SGE queue set's timer call back
3082 */
3083void t3_stop_sge_timers(struct adapter *adap)
3084{
3085 int i;
3086
3087 for (i = 0; i < SGE_QSETS; ++i) {
3088 struct sge_qset *q = &adap->sge.qs[i];
3089
3090 if (q->tx_reclaim_timer.function)
3091 del_timer_sync(&q->tx_reclaim_timer);
Divy Le Ray42c8ea12009-03-12 21:14:04 +00003092 if (q->rx_reclaim_timer.function)
3093 del_timer_sync(&q->rx_reclaim_timer);
Divy Le Ray0ca41c02008-09-25 14:05:28 +00003094 }
3095}
3096
3097/**
Divy Le Ray4d22de32007-01-18 22:04:14 -05003098 * t3_free_sge_resources - free SGE resources
3099 * @adap: the adapter
3100 *
3101 * Frees resources used by the SGE queue sets.
3102 */
3103void t3_free_sge_resources(struct adapter *adap)
3104{
3105 int i;
3106
3107 for (i = 0; i < SGE_QSETS; ++i)
3108 t3_free_qset(adap, &adap->sge.qs[i]);
3109}
3110
3111/**
3112 * t3_sge_start - enable SGE
3113 * @adap: the adapter
3114 *
3115 * Enables the SGE for DMAs. This is the last step in starting packet
3116 * transfers.
3117 */
3118void t3_sge_start(struct adapter *adap)
3119{
3120 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
3121}
3122
3123/**
3124 * t3_sge_stop - disable SGE operation
3125 * @adap: the adapter
3126 *
3127 * Disables the DMA engine. This can be called in emeregencies (e.g.,
3128 * from error interrupts) or from normal process context. In the latter
3129 * case it also disables any pending queue restart tasklets. Note that
3130 * if it is called in interrupt context it cannot disable the restart
3131 * tasklets as it cannot wait, however the tasklets will have no effect
3132 * since the doorbells are disabled and the driver will call this again
3133 * later from process context, at which time the tasklets will be stopped
3134 * if they are still running.
3135 */
3136void t3_sge_stop(struct adapter *adap)
3137{
3138 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
3139 if (!in_interrupt()) {
3140 int i;
3141
3142 for (i = 0; i < SGE_QSETS; ++i) {
3143 struct sge_qset *qs = &adap->sge.qs[i];
3144
3145 tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
3146 tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
3147 }
3148 }
3149}
3150
3151/**
3152 * t3_sge_init - initialize SGE
3153 * @adap: the adapter
3154 * @p: the SGE parameters
3155 *
3156 * Performs SGE initialization needed every time after a chip reset.
3157 * We do not initialize any of the queue sets here, instead the driver
3158 * top-level must request those individually. We also do not enable DMA
3159 * here, that should be done after the queues have been set up.
3160 */
3161void t3_sge_init(struct adapter *adap, struct sge_params *p)
3162{
3163 unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
3164
3165 ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
Divy Le Rayb8819552007-12-17 18:47:31 -08003166 F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
Divy Le Ray4d22de32007-01-18 22:04:14 -05003167 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
3168 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
3169#if SGE_NUM_GENBITS == 1
3170 ctrl |= F_EGRGENCTRL;
3171#endif
3172 if (adap->params.rev > 0) {
3173 if (!(adap->flags & (USING_MSIX | USING_MSI)))
3174 ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
Divy Le Ray4d22de32007-01-18 22:04:14 -05003175 }
3176 t3_write_reg(adap, A_SG_CONTROL, ctrl);
3177 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
3178 V_LORCQDRBTHRSH(512));
3179 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
3180 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
Divy Le Ray6195c712007-01-30 19:43:56 -08003181 V_TIMEOUT(200 * core_ticks_per_usec(adap)));
Divy Le Rayb8819552007-12-17 18:47:31 -08003182 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
3183 adap->params.rev < T3_REV_C ? 1000 : 500);
Divy Le Ray4d22de32007-01-18 22:04:14 -05003184 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
3185 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
3186 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
3187 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
3188 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
3189}
3190
3191/**
3192 * t3_sge_prep - one-time SGE initialization
3193 * @adap: the associated adapter
3194 * @p: SGE parameters
3195 *
3196 * Performs one-time initialization of SGE SW state. Includes determining
3197 * defaults for the assorted SGE parameters, which admins can change until
3198 * they are used to initialize the SGE.
3199 */
Roland Dreier7b9b0942008-01-29 14:45:11 -08003200void t3_sge_prep(struct adapter *adap, struct sge_params *p)
Divy Le Ray4d22de32007-01-18 22:04:14 -05003201{
3202 int i;
3203
3204 p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
3205 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3206
3207 for (i = 0; i < SGE_QSETS; ++i) {
3208 struct qset_params *q = p->qset + i;
3209
3210 q->polling = adap->params.rev > 0;
3211 q->coalesce_usecs = 5;
3212 q->rspq_size = 1024;
Divy Le Raye0994eb2007-02-24 16:44:17 -08003213 q->fl_size = 1024;
Divy Le Ray7385ecf2008-05-21 18:56:21 -07003214 q->jumbo_size = 512;
Divy Le Ray4d22de32007-01-18 22:04:14 -05003215 q->txq_size[TXQ_ETH] = 1024;
3216 q->txq_size[TXQ_OFLD] = 1024;
3217 q->txq_size[TXQ_CTRL] = 256;
3218 q->cong_thres = 0;
3219 }
3220
3221 spin_lock_init(&adap->sge.reg_lock);
3222}
3223
3224/**
3225 * t3_get_desc - dump an SGE descriptor for debugging purposes
3226 * @qs: the queue set
3227 * @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
3228 * @idx: the descriptor index in the queue
3229 * @data: where to dump the descriptor contents
3230 *
3231 * Dumps the contents of a HW descriptor of an SGE queue. Returns the
3232 * size of the descriptor.
3233 */
3234int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
3235 unsigned char *data)
3236{
3237 if (qnum >= 6)
3238 return -EINVAL;
3239
3240 if (qnum < 3) {
3241 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
3242 return -EINVAL;
3243 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
3244 return sizeof(struct tx_desc);
3245 }
3246
3247 if (qnum == 3) {
3248 if (!qs->rspq.desc || idx >= qs->rspq.size)
3249 return -EINVAL;
3250 memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
3251 return sizeof(struct rsp_desc);
3252 }
3253
3254 qnum -= 4;
3255 if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
3256 return -EINVAL;
3257 memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
3258 return sizeof(struct rx_desc);
3259}