Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 1 | /***************************************************************************** |
| 2 | * * |
| 3 | * File: sge.c * |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 4 | * $Revision: 1.26 $ * |
| 5 | * $Date: 2005/06/21 18:29:48 $ * |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 6 | * Description: * |
| 7 | * DMA engine. * |
| 8 | * part of the Chelsio 10Gb Ethernet Driver. * |
| 9 | * * |
| 10 | * This program is free software; you can redistribute it and/or modify * |
| 11 | * it under the terms of the GNU General Public License, version 2, as * |
| 12 | * published by the Free Software Foundation. * |
| 13 | * * |
| 14 | * You should have received a copy of the GNU General Public License along * |
| 15 | * with this program; if not, write to the Free Software Foundation, Inc., * |
| 16 | * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * |
| 17 | * * |
| 18 | * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * |
| 19 | * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * |
| 20 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * |
| 21 | * * |
| 22 | * http://www.chelsio.com * |
| 23 | * * |
| 24 | * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * |
| 25 | * All rights reserved. * |
| 26 | * * |
| 27 | * Maintainers: maintainers@chelsio.com * |
| 28 | * * |
| 29 | * Authors: Dimitrios Michailidis <dm@chelsio.com> * |
| 30 | * Tina Yang <tainay@chelsio.com> * |
| 31 | * Felix Marti <felix@chelsio.com> * |
| 32 | * Scott Bardone <sbardone@chelsio.com> * |
| 33 | * Kurt Ottaway <kottaway@chelsio.com> * |
| 34 | * Frank DiMambro <frank@chelsio.com> * |
| 35 | * * |
| 36 | * History: * |
| 37 | * * |
| 38 | ****************************************************************************/ |
| 39 | |
| 40 | #include "common.h" |
| 41 | |
| 42 | #include <linux/config.h> |
| 43 | #include <linux/types.h> |
| 44 | #include <linux/errno.h> |
| 45 | #include <linux/pci.h> |
| 46 | #include <linux/netdevice.h> |
| 47 | #include <linux/etherdevice.h> |
| 48 | #include <linux/if_vlan.h> |
| 49 | #include <linux/skbuff.h> |
| 50 | #include <linux/init.h> |
| 51 | #include <linux/mm.h> |
| 52 | #include <linux/ip.h> |
| 53 | #include <linux/in.h> |
| 54 | #include <linux/if_arp.h> |
| 55 | |
| 56 | #include "cpl5_cmd.h" |
| 57 | #include "sge.h" |
| 58 | #include "regs.h" |
| 59 | #include "espi.h" |
| 60 | |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 61 | |
| 62 | #ifdef NETIF_F_TSO |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 63 | #include <linux/tcp.h> |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 64 | #endif |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 65 | |
| 66 | #define SGE_CMDQ_N 2 |
| 67 | #define SGE_FREELQ_N 2 |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 68 | #define SGE_CMDQ0_E_N 1024 |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 69 | #define SGE_CMDQ1_E_N 128 |
| 70 | #define SGE_FREEL_SIZE 4096 |
| 71 | #define SGE_JUMBO_FREEL_SIZE 512 |
| 72 | #define SGE_FREEL_REFILL_THRESH 16 |
| 73 | #define SGE_RESPQ_E_N 1024 |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 74 | #define SGE_INTRTIMER_NRES 1000 |
| 75 | #define SGE_RX_COPY_THRES 256 |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 76 | #define SGE_RX_SM_BUF_SIZE 1536 |
| 77 | |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 78 | # define SGE_RX_DROP_THRES 2 |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 79 | |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 80 | #define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4) |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 81 | |
| 82 | /* |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 83 | * Period of the TX buffer reclaim timer. This timer does not need to run |
| 84 | * frequently as TX buffers are usually reclaimed by new TX packets. |
| 85 | */ |
| 86 | #define TX_RECLAIM_PERIOD (HZ / 4) |
| 87 | |
| 88 | #ifndef NET_IP_ALIGN |
| 89 | # define NET_IP_ALIGN 2 |
| 90 | #endif |
| 91 | |
| 92 | #define M_CMD_LEN 0x7fffffff |
| 93 | #define V_CMD_LEN(v) (v) |
| 94 | #define G_CMD_LEN(v) ((v) & M_CMD_LEN) |
| 95 | #define V_CMD_GEN1(v) ((v) << 31) |
| 96 | #define V_CMD_GEN2(v) (v) |
| 97 | #define F_CMD_DATAVALID (1 << 1) |
| 98 | #define F_CMD_SOP (1 << 2) |
| 99 | #define V_CMD_EOP(v) ((v) << 3) |
| 100 | |
| 101 | /* |
| 102 | * Command queue, receive buffer list, and response queue descriptors. |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 103 | */ |
| 104 | #if defined(__BIG_ENDIAN_BITFIELD) |
| 105 | struct cmdQ_e { |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 106 | u32 addr_lo; |
| 107 | u32 len_gen; |
| 108 | u32 flags; |
| 109 | u32 addr_hi; |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 110 | }; |
| 111 | |
| 112 | struct freelQ_e { |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 113 | u32 addr_lo; |
| 114 | u32 len_gen; |
| 115 | u32 gen2; |
| 116 | u32 addr_hi; |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 117 | }; |
| 118 | |
| 119 | struct respQ_e { |
| 120 | u32 Qsleeping : 4; |
| 121 | u32 Cmdq1CreditReturn : 5; |
| 122 | u32 Cmdq1DmaComplete : 5; |
| 123 | u32 Cmdq0CreditReturn : 5; |
| 124 | u32 Cmdq0DmaComplete : 5; |
| 125 | u32 FreelistQid : 2; |
| 126 | u32 CreditValid : 1; |
| 127 | u32 DataValid : 1; |
| 128 | u32 Offload : 1; |
| 129 | u32 Eop : 1; |
| 130 | u32 Sop : 1; |
| 131 | u32 GenerationBit : 1; |
| 132 | u32 BufferLength; |
| 133 | }; |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 134 | #elif defined(__LITTLE_ENDIAN_BITFIELD) |
| 135 | struct cmdQ_e { |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 136 | u32 len_gen; |
| 137 | u32 addr_lo; |
| 138 | u32 addr_hi; |
| 139 | u32 flags; |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 140 | }; |
| 141 | |
| 142 | struct freelQ_e { |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 143 | u32 len_gen; |
| 144 | u32 addr_lo; |
| 145 | u32 addr_hi; |
| 146 | u32 gen2; |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 147 | }; |
| 148 | |
| 149 | struct respQ_e { |
| 150 | u32 BufferLength; |
| 151 | u32 GenerationBit : 1; |
| 152 | u32 Sop : 1; |
| 153 | u32 Eop : 1; |
| 154 | u32 Offload : 1; |
| 155 | u32 DataValid : 1; |
| 156 | u32 CreditValid : 1; |
| 157 | u32 FreelistQid : 2; |
| 158 | u32 Cmdq0DmaComplete : 5; |
| 159 | u32 Cmdq0CreditReturn : 5; |
| 160 | u32 Cmdq1DmaComplete : 5; |
| 161 | u32 Cmdq1CreditReturn : 5; |
| 162 | u32 Qsleeping : 4; |
| 163 | } ; |
| 164 | #endif |
| 165 | |
| 166 | /* |
| 167 | * SW Context Command and Freelist Queue Descriptors |
| 168 | */ |
| 169 | struct cmdQ_ce { |
| 170 | struct sk_buff *skb; |
| 171 | DECLARE_PCI_UNMAP_ADDR(dma_addr); |
| 172 | DECLARE_PCI_UNMAP_LEN(dma_len); |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 173 | }; |
| 174 | |
| 175 | struct freelQ_ce { |
| 176 | struct sk_buff *skb; |
| 177 | DECLARE_PCI_UNMAP_ADDR(dma_addr); |
| 178 | DECLARE_PCI_UNMAP_LEN(dma_len); |
| 179 | }; |
| 180 | |
| 181 | /* |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 182 | * SW command, freelist and response rings |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 183 | */ |
| 184 | struct cmdQ { |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 185 | unsigned long status; /* HW DMA fetch status */ |
| 186 | unsigned int in_use; /* # of in-use command descriptors */ |
| 187 | unsigned int size; /* # of descriptors */ |
| 188 | unsigned int processed; /* total # of descs HW has processed */ |
| 189 | unsigned int cleaned; /* total # of descs SW has reclaimed */ |
| 190 | unsigned int stop_thres; /* SW TX queue suspend threshold */ |
| 191 | u16 pidx; /* producer index (SW) */ |
| 192 | u16 cidx; /* consumer index (HW) */ |
| 193 | u8 genbit; /* current generation (=valid) bit */ |
| 194 | u8 sop; /* is next entry start of packet? */ |
| 195 | struct cmdQ_e *entries; /* HW command descriptor Q */ |
| 196 | struct cmdQ_ce *centries; /* SW command context descriptor Q */ |
| 197 | spinlock_t lock; /* Lock to protect cmdQ enqueuing */ |
| 198 | dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */ |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 199 | }; |
| 200 | |
| 201 | struct freelQ { |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 202 | unsigned int credits; /* # of available RX buffers */ |
| 203 | unsigned int size; /* free list capacity */ |
| 204 | u16 pidx; /* producer index (SW) */ |
| 205 | u16 cidx; /* consumer index (HW) */ |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 206 | u16 rx_buffer_size; /* Buffer size on this free list */ |
| 207 | u16 dma_offset; /* DMA offset to align IP headers */ |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 208 | u16 recycleq_idx; /* skb recycle q to use */ |
| 209 | u8 genbit; /* current generation (=valid) bit */ |
| 210 | struct freelQ_e *entries; /* HW freelist descriptor Q */ |
| 211 | struct freelQ_ce *centries; /* SW freelist context descriptor Q */ |
| 212 | dma_addr_t dma_addr; /* DMA addr HW freelist descriptor Q */ |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 213 | }; |
| 214 | |
| 215 | struct respQ { |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 216 | unsigned int credits; /* credits to be returned to SGE */ |
| 217 | unsigned int size; /* # of response Q descriptors */ |
| 218 | u16 cidx; /* consumer index (SW) */ |
| 219 | u8 genbit; /* current generation(=valid) bit */ |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 220 | struct respQ_e *entries; /* HW response descriptor Q */ |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 221 | dma_addr_t dma_addr; /* DMA addr HW response descriptor Q */ |
| 222 | }; |
| 223 | |
| 224 | /* Bit flags for cmdQ.status */ |
| 225 | enum { |
| 226 | CMDQ_STAT_RUNNING = 1, /* fetch engine is running */ |
| 227 | CMDQ_STAT_LAST_PKT_DB = 2 /* last packet rung the doorbell */ |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 228 | }; |
| 229 | |
| 230 | /* |
| 231 | * Main SGE data structure |
| 232 | * |
| 233 | * Interrupts are handled by a single CPU and it is likely that on a MP system |
| 234 | * the application is migrated to another CPU. In that scenario, we try to |
| 235 | * seperate the RX(in irq context) and TX state in order to decrease memory |
| 236 | * contention. |
| 237 | */ |
| 238 | struct sge { |
| 239 | struct adapter *adapter; /* adapter backpointer */ |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 240 | struct net_device *netdev; /* netdevice backpointer */ |
| 241 | struct freelQ freelQ[SGE_FREELQ_N]; /* buffer free lists */ |
| 242 | struct respQ respQ; /* response Q */ |
| 243 | unsigned long stopped_tx_queues; /* bitmap of suspended Tx queues */ |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 244 | unsigned int rx_pkt_pad; /* RX padding for L2 packets */ |
| 245 | unsigned int jumbo_fl; /* jumbo freelist Q index */ |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 246 | unsigned int intrtimer_nres; /* no-resource interrupt timer */ |
| 247 | unsigned int fixed_intrtimer;/* non-adaptive interrupt timer */ |
| 248 | struct timer_list tx_reclaim_timer; /* reclaims TX buffers */ |
| 249 | struct timer_list espibug_timer; |
| 250 | unsigned int espibug_timeout; |
| 251 | struct sk_buff *espibug_skb; |
| 252 | u32 sge_control; /* shadow value of sge control reg */ |
| 253 | struct sge_intr_counts stats; |
| 254 | struct sge_port_stats port_stats[MAX_NPORTS]; |
| 255 | struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp; |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 256 | }; |
| 257 | |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 258 | /* |
| 259 | * PIO to indicate that memory mapped Q contains valid descriptor(s). |
| 260 | */ |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 261 | static inline void doorbell_pio(struct adapter *adapter, u32 val) |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 262 | { |
| 263 | wmb(); |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 264 | writel(val, adapter->regs + A_SG_DOORBELL); |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 265 | } |
| 266 | |
| 267 | /* |
| 268 | * Frees all RX buffers on the freelist Q. The caller must make sure that |
| 269 | * the SGE is turned off before calling this function. |
| 270 | */ |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 271 | static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q) |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 272 | { |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 273 | unsigned int cidx = q->cidx; |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 274 | |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 275 | while (q->credits--) { |
| 276 | struct freelQ_ce *ce = &q->centries[cidx]; |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 277 | |
| 278 | pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr), |
| 279 | pci_unmap_len(ce, dma_len), |
| 280 | PCI_DMA_FROMDEVICE); |
| 281 | dev_kfree_skb(ce->skb); |
| 282 | ce->skb = NULL; |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 283 | if (++cidx == q->size) |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 284 | cidx = 0; |
| 285 | } |
| 286 | } |
| 287 | |
| 288 | /* |
| 289 | * Free RX free list and response queue resources. |
| 290 | */ |
| 291 | static void free_rx_resources(struct sge *sge) |
| 292 | { |
| 293 | struct pci_dev *pdev = sge->adapter->pdev; |
| 294 | unsigned int size, i; |
| 295 | |
| 296 | if (sge->respQ.entries) { |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 297 | size = sizeof(struct respQ_e) * sge->respQ.size; |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 298 | pci_free_consistent(pdev, size, sge->respQ.entries, |
| 299 | sge->respQ.dma_addr); |
| 300 | } |
| 301 | |
| 302 | for (i = 0; i < SGE_FREELQ_N; i++) { |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 303 | struct freelQ *q = &sge->freelQ[i]; |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 304 | |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 305 | if (q->centries) { |
| 306 | free_freelQ_buffers(pdev, q); |
| 307 | kfree(q->centries); |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 308 | } |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 309 | if (q->entries) { |
| 310 | size = sizeof(struct freelQ_e) * q->size; |
| 311 | pci_free_consistent(pdev, size, q->entries, |
| 312 | q->dma_addr); |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 313 | } |
| 314 | } |
| 315 | } |
| 316 | |
| 317 | /* |
| 318 | * Allocates basic RX resources, consisting of memory mapped freelist Qs and a |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 319 | * response queue. |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 320 | */ |
| 321 | static int alloc_rx_resources(struct sge *sge, struct sge_params *p) |
| 322 | { |
| 323 | struct pci_dev *pdev = sge->adapter->pdev; |
| 324 | unsigned int size, i; |
| 325 | |
| 326 | for (i = 0; i < SGE_FREELQ_N; i++) { |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 327 | struct freelQ *q = &sge->freelQ[i]; |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 328 | |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 329 | q->genbit = 1; |
| 330 | q->size = p->freelQ_size[i]; |
| 331 | q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN; |
| 332 | size = sizeof(struct freelQ_e) * q->size; |
| 333 | q->entries = (struct freelQ_e *) |
| 334 | pci_alloc_consistent(pdev, size, &q->dma_addr); |
| 335 | if (!q->entries) |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 336 | goto err_no_mem; |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 337 | memset(q->entries, 0, size); |
| 338 | size = sizeof(struct freelQ_ce) * q->size; |
| 339 | q->centries = kmalloc(size, GFP_KERNEL); |
| 340 | if (!q->centries) |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 341 | goto err_no_mem; |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 342 | memset(q->centries, 0, size); |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 343 | } |
| 344 | |
| 345 | /* |
| 346 | * Calculate the buffer sizes for the two free lists. FL0 accommodates |
| 347 | * regular sized Ethernet frames, FL1 is sized not to exceed 16K, |
| 348 | * including all the sk_buff overhead. |
| 349 | * |
| 350 | * Note: For T2 FL0 and FL1 are reversed. |
| 351 | */ |
| 352 | sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE + |
| 353 | sizeof(struct cpl_rx_data) + |
| 354 | sge->freelQ[!sge->jumbo_fl].dma_offset; |
| 355 | sge->freelQ[sge->jumbo_fl].rx_buffer_size = (16 * 1024) - |
| 356 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
| 357 | |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 358 | /* |
| 359 | * Setup which skb recycle Q should be used when recycling buffers from |
| 360 | * each free list. |
| 361 | */ |
| 362 | sge->freelQ[!sge->jumbo_fl].recycleq_idx = 0; |
| 363 | sge->freelQ[sge->jumbo_fl].recycleq_idx = 1; |
| 364 | |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 365 | sge->respQ.genbit = 1; |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 366 | sge->respQ.size = SGE_RESPQ_E_N; |
| 367 | sge->respQ.credits = 0; |
| 368 | size = sizeof(struct respQ_e) * sge->respQ.size; |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 369 | sge->respQ.entries = (struct respQ_e *) |
| 370 | pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr); |
| 371 | if (!sge->respQ.entries) |
| 372 | goto err_no_mem; |
| 373 | memset(sge->respQ.entries, 0, size); |
| 374 | return 0; |
| 375 | |
| 376 | err_no_mem: |
| 377 | free_rx_resources(sge); |
| 378 | return -ENOMEM; |
| 379 | } |
| 380 | |
| 381 | /* |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 382 | * Reclaims n TX descriptors and frees the buffers associated with them. |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 383 | */ |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 384 | static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n) |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 385 | { |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 386 | struct cmdQ_ce *ce; |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 387 | struct pci_dev *pdev = sge->adapter->pdev; |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 388 | unsigned int cidx = q->cidx; |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 389 | |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 390 | q->in_use -= n; |
| 391 | ce = &q->centries[cidx]; |
| 392 | while (n--) { |
| 393 | if (q->sop) |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 394 | pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr), |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 395 | pci_unmap_len(ce, dma_len), |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 396 | PCI_DMA_TODEVICE); |
| 397 | else |
| 398 | pci_unmap_page(pdev, pci_unmap_addr(ce, dma_addr), |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 399 | pci_unmap_len(ce, dma_len), |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 400 | PCI_DMA_TODEVICE); |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 401 | q->sop = 0; |
| 402 | if (ce->skb) { |
| 403 | dev_kfree_skb(ce->skb); |
| 404 | q->sop = 1; |
| 405 | } |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 406 | ce++; |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 407 | if (++cidx == q->size) { |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 408 | cidx = 0; |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 409 | ce = q->centries; |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 410 | } |
| 411 | } |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 412 | q->cidx = cidx; |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 413 | } |
| 414 | |
| 415 | /* |
| 416 | * Free TX resources. |
| 417 | * |
| 418 | * Assumes that SGE is stopped and all interrupts are disabled. |
| 419 | */ |
| 420 | static void free_tx_resources(struct sge *sge) |
| 421 | { |
| 422 | struct pci_dev *pdev = sge->adapter->pdev; |
| 423 | unsigned int size, i; |
| 424 | |
| 425 | for (i = 0; i < SGE_CMDQ_N; i++) { |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 426 | struct cmdQ *q = &sge->cmdQ[i]; |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 427 | |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 428 | if (q->centries) { |
| 429 | if (q->in_use) |
| 430 | free_cmdQ_buffers(sge, q, q->in_use); |
| 431 | kfree(q->centries); |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 432 | } |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 433 | if (q->entries) { |
| 434 | size = sizeof(struct cmdQ_e) * q->size; |
| 435 | pci_free_consistent(pdev, size, q->entries, |
| 436 | q->dma_addr); |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 437 | } |
| 438 | } |
| 439 | } |
| 440 | |
| 441 | /* |
| 442 | * Allocates basic TX resources, consisting of memory mapped command Qs. |
| 443 | */ |
| 444 | static int alloc_tx_resources(struct sge *sge, struct sge_params *p) |
| 445 | { |
| 446 | struct pci_dev *pdev = sge->adapter->pdev; |
| 447 | unsigned int size, i; |
| 448 | |
| 449 | for (i = 0; i < SGE_CMDQ_N; i++) { |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 450 | struct cmdQ *q = &sge->cmdQ[i]; |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 451 | |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 452 | q->genbit = 1; |
| 453 | q->sop = 1; |
| 454 | q->size = p->cmdQ_size[i]; |
| 455 | q->in_use = 0; |
| 456 | q->status = 0; |
| 457 | q->processed = q->cleaned = 0; |
| 458 | q->stop_thres = 0; |
| 459 | spin_lock_init(&q->lock); |
| 460 | size = sizeof(struct cmdQ_e) * q->size; |
| 461 | q->entries = (struct cmdQ_e *) |
| 462 | pci_alloc_consistent(pdev, size, &q->dma_addr); |
| 463 | if (!q->entries) |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 464 | goto err_no_mem; |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 465 | memset(q->entries, 0, size); |
| 466 | size = sizeof(struct cmdQ_ce) * q->size; |
| 467 | q->centries = kmalloc(size, GFP_KERNEL); |
| 468 | if (!q->centries) |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 469 | goto err_no_mem; |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 470 | memset(q->centries, 0, size); |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 471 | } |
| 472 | |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 473 | /* |
| 474 | * CommandQ 0 handles Ethernet and TOE packets, while queue 1 is TOE |
| 475 | * only. For queue 0 set the stop threshold so we can handle one more |
| 476 | * packet from each port, plus reserve an additional 24 entries for |
| 477 | * Ethernet packets only. Queue 1 never suspends nor do we reserve |
| 478 | * space for Ethernet packets. |
| 479 | */ |
| 480 | sge->cmdQ[0].stop_thres = sge->adapter->params.nports * |
| 481 | (MAX_SKB_FRAGS + 1); |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 482 | return 0; |
| 483 | |
| 484 | err_no_mem: |
| 485 | free_tx_resources(sge); |
| 486 | return -ENOMEM; |
| 487 | } |
| 488 | |
| 489 | static inline void setup_ring_params(struct adapter *adapter, u64 addr, |
| 490 | u32 size, int base_reg_lo, |
| 491 | int base_reg_hi, int size_reg) |
| 492 | { |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 493 | writel((u32)addr, adapter->regs + base_reg_lo); |
| 494 | writel(addr >> 32, adapter->regs + base_reg_hi); |
| 495 | writel(size, adapter->regs + size_reg); |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 496 | } |
| 497 | |
| 498 | /* |
| 499 | * Enable/disable VLAN acceleration. |
| 500 | */ |
| 501 | void t1_set_vlan_accel(struct adapter *adapter, int on_off) |
| 502 | { |
| 503 | struct sge *sge = adapter->sge; |
| 504 | |
| 505 | sge->sge_control &= ~F_VLAN_XTRACT; |
| 506 | if (on_off) |
| 507 | sge->sge_control |= F_VLAN_XTRACT; |
| 508 | if (adapter->open_device_map) { |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 509 | writel(sge->sge_control, adapter->regs + A_SG_CONTROL); |
| 510 | readl(adapter->regs + A_SG_CONTROL); /* flush */ |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 511 | } |
| 512 | } |
| 513 | |
| 514 | /* |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 515 | * Programs the various SGE registers. However, the engine is not yet enabled, |
| 516 | * but sge->sge_control is setup and ready to go. |
| 517 | */ |
| 518 | static void configure_sge(struct sge *sge, struct sge_params *p) |
| 519 | { |
| 520 | struct adapter *ap = sge->adapter; |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 521 | |
| 522 | writel(0, ap->regs + A_SG_CONTROL); |
| 523 | setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size, |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 524 | A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE); |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 525 | setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].size, |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 526 | A_SG_CMD1BASELWR, A_SG_CMD1BASEUPR, A_SG_CMD1SIZE); |
| 527 | setup_ring_params(ap, sge->freelQ[0].dma_addr, |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 528 | sge->freelQ[0].size, A_SG_FL0BASELWR, |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 529 | A_SG_FL0BASEUPR, A_SG_FL0SIZE); |
| 530 | setup_ring_params(ap, sge->freelQ[1].dma_addr, |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 531 | sge->freelQ[1].size, A_SG_FL1BASELWR, |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 532 | A_SG_FL1BASEUPR, A_SG_FL1SIZE); |
| 533 | |
| 534 | /* The threshold comparison uses <. */ |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 535 | writel(SGE_RX_SM_BUF_SIZE + 1, ap->regs + A_SG_FLTHRESHOLD); |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 536 | |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 537 | setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.size, |
| 538 | A_SG_RSPBASELWR, A_SG_RSPBASEUPR, A_SG_RSPSIZE); |
| 539 | writel((u32)sge->respQ.size - 1, ap->regs + A_SG_RSPQUEUECREDIT); |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 540 | |
| 541 | sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE | |
| 542 | F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE | |
| 543 | V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE | |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 544 | F_DISABLE_FL0_GTS | F_DISABLE_FL1_GTS | |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 545 | V_RX_PKT_OFFSET(sge->rx_pkt_pad); |
| 546 | |
| 547 | #if defined(__BIG_ENDIAN_BITFIELD) |
| 548 | sge->sge_control |= F_ENABLE_BIG_ENDIAN; |
| 549 | #endif |
| 550 | |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 551 | /* Initialize no-resource timer */ |
| 552 | sge->intrtimer_nres = SGE_INTRTIMER_NRES * core_ticks_per_usec(ap); |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 553 | |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 554 | t1_sge_set_coalesce_params(sge, p); |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 555 | } |
| 556 | |
| 557 | /* |
| 558 | * Return the payload capacity of the jumbo free-list buffers. |
| 559 | */ |
| 560 | static inline unsigned int jumbo_payload_capacity(const struct sge *sge) |
| 561 | { |
| 562 | return sge->freelQ[sge->jumbo_fl].rx_buffer_size - |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 563 | sge->freelQ[sge->jumbo_fl].dma_offset - |
| 564 | sizeof(struct cpl_rx_data); |
| 565 | } |
| 566 | |
| 567 | /* |
| 568 | * Frees all SGE related resources and the sge structure itself |
| 569 | */ |
| 570 | void t1_sge_destroy(struct sge *sge) |
| 571 | { |
| 572 | if (sge->espibug_skb) |
| 573 | kfree_skb(sge->espibug_skb); |
| 574 | |
| 575 | free_tx_resources(sge); |
| 576 | free_rx_resources(sge); |
| 577 | kfree(sge); |
| 578 | } |
| 579 | |
| 580 | /* |
| 581 | * Allocates new RX buffers on the freelist Q (and tracks them on the freelist |
| 582 | * context Q) until the Q is full or alloc_skb fails. |
| 583 | * |
| 584 | * It is possible that the generation bits already match, indicating that the |
| 585 | * buffer is already valid and nothing needs to be done. This happens when we |
| 586 | * copied a received buffer into a new sk_buff during the interrupt processing. |
| 587 | * |
| 588 | * If the SGE doesn't automatically align packets properly (!sge->rx_pkt_pad), |
| 589 | * we specify a RX_OFFSET in order to make sure that the IP header is 4B |
| 590 | * aligned. |
| 591 | */ |
| 592 | static void refill_free_list(struct sge *sge, struct freelQ *q) |
| 593 | { |
| 594 | struct pci_dev *pdev = sge->adapter->pdev; |
| 595 | struct freelQ_ce *ce = &q->centries[q->pidx]; |
| 596 | struct freelQ_e *e = &q->entries[q->pidx]; |
| 597 | unsigned int dma_len = q->rx_buffer_size - q->dma_offset; |
| 598 | |
| 599 | |
| 600 | while (q->credits < q->size) { |
| 601 | struct sk_buff *skb; |
| 602 | dma_addr_t mapping; |
| 603 | |
| 604 | skb = alloc_skb(q->rx_buffer_size, GFP_ATOMIC); |
| 605 | if (!skb) |
| 606 | break; |
| 607 | |
| 608 | skb_reserve(skb, q->dma_offset); |
| 609 | mapping = pci_map_single(pdev, skb->data, dma_len, |
| 610 | PCI_DMA_FROMDEVICE); |
| 611 | ce->skb = skb; |
| 612 | pci_unmap_addr_set(ce, dma_addr, mapping); |
| 613 | pci_unmap_len_set(ce, dma_len, dma_len); |
| 614 | e->addr_lo = (u32)mapping; |
| 615 | e->addr_hi = (u64)mapping >> 32; |
| 616 | e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit); |
| 617 | wmb(); |
| 618 | e->gen2 = V_CMD_GEN2(q->genbit); |
| 619 | |
| 620 | e++; |
| 621 | ce++; |
| 622 | if (++q->pidx == q->size) { |
| 623 | q->pidx = 0; |
| 624 | q->genbit ^= 1; |
| 625 | ce = q->centries; |
| 626 | e = q->entries; |
| 627 | } |
| 628 | q->credits++; |
| 629 | } |
| 630 | |
| 631 | } |
| 632 | |
| 633 | /* |
| 634 | * Calls refill_free_list for both free lists. If we cannot fill at least 1/4 |
| 635 | * of both rings, we go into 'few interrupt mode' in order to give the system |
| 636 | * time to free up resources. |
| 637 | */ |
| 638 | static void freelQs_empty(struct sge *sge) |
| 639 | { |
| 640 | struct adapter *adapter = sge->adapter; |
| 641 | u32 irq_reg = readl(adapter->regs + A_SG_INT_ENABLE); |
| 642 | u32 irqholdoff_reg; |
| 643 | |
| 644 | refill_free_list(sge, &sge->freelQ[0]); |
| 645 | refill_free_list(sge, &sge->freelQ[1]); |
| 646 | |
| 647 | if (sge->freelQ[0].credits > (sge->freelQ[0].size >> 2) && |
| 648 | sge->freelQ[1].credits > (sge->freelQ[1].size >> 2)) { |
| 649 | irq_reg |= F_FL_EXHAUSTED; |
| 650 | irqholdoff_reg = sge->fixed_intrtimer; |
| 651 | } else { |
| 652 | /* Clear the F_FL_EXHAUSTED interrupts for now */ |
| 653 | irq_reg &= ~F_FL_EXHAUSTED; |
| 654 | irqholdoff_reg = sge->intrtimer_nres; |
| 655 | } |
| 656 | writel(irqholdoff_reg, adapter->regs + A_SG_INTRTIMER); |
| 657 | writel(irq_reg, adapter->regs + A_SG_INT_ENABLE); |
| 658 | |
| 659 | /* We reenable the Qs to force a freelist GTS interrupt later */ |
| 660 | doorbell_pio(adapter, F_FL0_ENABLE | F_FL1_ENABLE); |
| 661 | } |
| 662 | |
| 663 | #define SGE_PL_INTR_MASK (F_PL_INTR_SGE_ERR | F_PL_INTR_SGE_DATA) |
| 664 | #define SGE_INT_FATAL (F_RESPQ_OVERFLOW | F_PACKET_TOO_BIG | F_PACKET_MISMATCH) |
| 665 | #define SGE_INT_ENABLE (F_RESPQ_EXHAUSTED | F_RESPQ_OVERFLOW | \ |
| 666 | F_FL_EXHAUSTED | F_PACKET_TOO_BIG | F_PACKET_MISMATCH) |
| 667 | |
| 668 | /* |
| 669 | * Disable SGE Interrupts |
| 670 | */ |
| 671 | void t1_sge_intr_disable(struct sge *sge) |
| 672 | { |
| 673 | u32 val = readl(sge->adapter->regs + A_PL_ENABLE); |
| 674 | |
| 675 | writel(val & ~SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE); |
| 676 | writel(0, sge->adapter->regs + A_SG_INT_ENABLE); |
| 677 | } |
| 678 | |
| 679 | /* |
| 680 | * Enable SGE interrupts. |
| 681 | */ |
| 682 | void t1_sge_intr_enable(struct sge *sge) |
| 683 | { |
| 684 | u32 en = SGE_INT_ENABLE; |
| 685 | u32 val = readl(sge->adapter->regs + A_PL_ENABLE); |
| 686 | |
| 687 | if (sge->adapter->flags & TSO_CAPABLE) |
| 688 | en &= ~F_PACKET_TOO_BIG; |
| 689 | writel(en, sge->adapter->regs + A_SG_INT_ENABLE); |
| 690 | writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE); |
| 691 | } |
| 692 | |
| 693 | /* |
| 694 | * Clear SGE interrupts. |
| 695 | */ |
| 696 | void t1_sge_intr_clear(struct sge *sge) |
| 697 | { |
| 698 | writel(SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_CAUSE); |
| 699 | writel(0xffffffff, sge->adapter->regs + A_SG_INT_CAUSE); |
| 700 | } |
| 701 | |
| 702 | /* |
| 703 | * SGE 'Error' interrupt handler |
| 704 | */ |
| 705 | int t1_sge_intr_error_handler(struct sge *sge) |
| 706 | { |
| 707 | struct adapter *adapter = sge->adapter; |
| 708 | u32 cause = readl(adapter->regs + A_SG_INT_CAUSE); |
| 709 | |
| 710 | if (adapter->flags & TSO_CAPABLE) |
| 711 | cause &= ~F_PACKET_TOO_BIG; |
| 712 | if (cause & F_RESPQ_EXHAUSTED) |
| 713 | sge->stats.respQ_empty++; |
| 714 | if (cause & F_RESPQ_OVERFLOW) { |
| 715 | sge->stats.respQ_overflow++; |
| 716 | CH_ALERT("%s: SGE response queue overflow\n", |
| 717 | adapter->name); |
| 718 | } |
| 719 | if (cause & F_FL_EXHAUSTED) { |
| 720 | sge->stats.freelistQ_empty++; |
| 721 | freelQs_empty(sge); |
| 722 | } |
| 723 | if (cause & F_PACKET_TOO_BIG) { |
| 724 | sge->stats.pkt_too_big++; |
| 725 | CH_ALERT("%s: SGE max packet size exceeded\n", |
| 726 | adapter->name); |
| 727 | } |
| 728 | if (cause & F_PACKET_MISMATCH) { |
| 729 | sge->stats.pkt_mismatch++; |
| 730 | CH_ALERT("%s: SGE packet mismatch\n", adapter->name); |
| 731 | } |
| 732 | if (cause & SGE_INT_FATAL) |
| 733 | t1_fatal_err(adapter); |
| 734 | |
| 735 | writel(cause, adapter->regs + A_SG_INT_CAUSE); |
| 736 | return 0; |
| 737 | } |
| 738 | |
| 739 | const struct sge_intr_counts *t1_sge_get_intr_counts(struct sge *sge) |
| 740 | { |
| 741 | return &sge->stats; |
| 742 | } |
| 743 | |
| 744 | const struct sge_port_stats *t1_sge_get_port_stats(struct sge *sge, int port) |
| 745 | { |
| 746 | return &sge->port_stats[port]; |
| 747 | } |
| 748 | |
| 749 | /** |
| 750 | * recycle_fl_buf - recycle a free list buffer |
| 751 | * @fl: the free list |
| 752 | * @idx: index of buffer to recycle |
| 753 | * |
| 754 | * Recycles the specified buffer on the given free list by adding it at |
| 755 | * the next available slot on the list. |
| 756 | */ |
| 757 | static void recycle_fl_buf(struct freelQ *fl, int idx) |
| 758 | { |
| 759 | struct freelQ_e *from = &fl->entries[idx]; |
| 760 | struct freelQ_e *to = &fl->entries[fl->pidx]; |
| 761 | |
| 762 | fl->centries[fl->pidx] = fl->centries[idx]; |
| 763 | to->addr_lo = from->addr_lo; |
| 764 | to->addr_hi = from->addr_hi; |
| 765 | to->len_gen = G_CMD_LEN(from->len_gen) | V_CMD_GEN1(fl->genbit); |
| 766 | wmb(); |
| 767 | to->gen2 = V_CMD_GEN2(fl->genbit); |
| 768 | fl->credits++; |
| 769 | |
| 770 | if (++fl->pidx == fl->size) { |
| 771 | fl->pidx = 0; |
| 772 | fl->genbit ^= 1; |
| 773 | } |
| 774 | } |
| 775 | |
| 776 | /** |
| 777 | * get_packet - return the next ingress packet buffer |
| 778 | * @pdev: the PCI device that received the packet |
| 779 | * @fl: the SGE free list holding the packet |
| 780 | * @len: the actual packet length, excluding any SGE padding |
| 781 | * @dma_pad: padding at beginning of buffer left by SGE DMA |
| 782 | * @skb_pad: padding to be used if the packet is copied |
| 783 | * @copy_thres: length threshold under which a packet should be copied |
| 784 | * @drop_thres: # of remaining buffers before we start dropping packets |
| 785 | * |
| 786 | * Get the next packet from a free list and complete setup of the |
| 787 | * sk_buff. If the packet is small we make a copy and recycle the |
| 788 | * original buffer, otherwise we use the original buffer itself. If a |
| 789 | * positive drop threshold is supplied packets are dropped and their |
| 790 | * buffers recycled if (a) the number of remaining buffers is under the |
| 791 | * threshold and the packet is too big to copy, or (b) the packet should |
| 792 | * be copied but there is no memory for the copy. |
| 793 | */ |
| 794 | static inline struct sk_buff *get_packet(struct pci_dev *pdev, |
| 795 | struct freelQ *fl, unsigned int len, |
| 796 | int dma_pad, int skb_pad, |
| 797 | unsigned int copy_thres, |
| 798 | unsigned int drop_thres) |
| 799 | { |
| 800 | struct sk_buff *skb; |
| 801 | struct freelQ_ce *ce = &fl->centries[fl->cidx]; |
| 802 | |
| 803 | if (len < copy_thres) { |
| 804 | skb = alloc_skb(len + skb_pad, GFP_ATOMIC); |
| 805 | if (likely(skb != NULL)) { |
| 806 | skb_reserve(skb, skb_pad); |
| 807 | skb_put(skb, len); |
| 808 | pci_dma_sync_single_for_cpu(pdev, |
| 809 | pci_unmap_addr(ce, dma_addr), |
| 810 | pci_unmap_len(ce, dma_len), |
| 811 | PCI_DMA_FROMDEVICE); |
| 812 | memcpy(skb->data, ce->skb->data + dma_pad, len); |
| 813 | pci_dma_sync_single_for_device(pdev, |
| 814 | pci_unmap_addr(ce, dma_addr), |
| 815 | pci_unmap_len(ce, dma_len), |
| 816 | PCI_DMA_FROMDEVICE); |
| 817 | } else if (!drop_thres) |
| 818 | goto use_orig_buf; |
| 819 | |
| 820 | recycle_fl_buf(fl, fl->cidx); |
| 821 | return skb; |
| 822 | } |
| 823 | |
| 824 | if (fl->credits < drop_thres) { |
| 825 | recycle_fl_buf(fl, fl->cidx); |
| 826 | return NULL; |
| 827 | } |
| 828 | |
| 829 | use_orig_buf: |
| 830 | pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr), |
| 831 | pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); |
| 832 | skb = ce->skb; |
| 833 | skb_reserve(skb, dma_pad); |
| 834 | skb_put(skb, len); |
| 835 | return skb; |
| 836 | } |
| 837 | |
| 838 | /** |
| 839 | * unexpected_offload - handle an unexpected offload packet |
| 840 | * @adapter: the adapter |
| 841 | * @fl: the free list that received the packet |
| 842 | * |
| 843 | * Called when we receive an unexpected offload packet (e.g., the TOE |
| 844 | * function is disabled or the card is a NIC). Prints a message and |
| 845 | * recycles the buffer. |
| 846 | */ |
| 847 | static void unexpected_offload(struct adapter *adapter, struct freelQ *fl) |
| 848 | { |
| 849 | struct freelQ_ce *ce = &fl->centries[fl->cidx]; |
| 850 | struct sk_buff *skb = ce->skb; |
| 851 | |
| 852 | pci_dma_sync_single_for_cpu(adapter->pdev, pci_unmap_addr(ce, dma_addr), |
| 853 | pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); |
| 854 | CH_ERR("%s: unexpected offload packet, cmd %u\n", |
| 855 | adapter->name, *skb->data); |
| 856 | recycle_fl_buf(fl, fl->cidx); |
| 857 | } |
| 858 | |
| 859 | /* |
| 860 | * Write the command descriptors to transmit the given skb starting at |
| 861 | * descriptor pidx with the given generation. |
| 862 | */ |
| 863 | static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb, |
| 864 | unsigned int pidx, unsigned int gen, |
| 865 | struct cmdQ *q) |
| 866 | { |
| 867 | dma_addr_t mapping; |
| 868 | struct cmdQ_e *e, *e1; |
| 869 | struct cmdQ_ce *ce; |
| 870 | unsigned int i, flags, nfrags = skb_shinfo(skb)->nr_frags; |
| 871 | |
| 872 | mapping = pci_map_single(adapter->pdev, skb->data, |
| 873 | skb->len - skb->data_len, PCI_DMA_TODEVICE); |
| 874 | ce = &q->centries[pidx]; |
| 875 | ce->skb = NULL; |
| 876 | pci_unmap_addr_set(ce, dma_addr, mapping); |
| 877 | pci_unmap_len_set(ce, dma_len, skb->len - skb->data_len); |
| 878 | |
| 879 | flags = F_CMD_DATAVALID | F_CMD_SOP | V_CMD_EOP(nfrags == 0) | |
| 880 | V_CMD_GEN2(gen); |
| 881 | e = &q->entries[pidx]; |
| 882 | e->addr_lo = (u32)mapping; |
| 883 | e->addr_hi = (u64)mapping >> 32; |
| 884 | e->len_gen = V_CMD_LEN(skb->len - skb->data_len) | V_CMD_GEN1(gen); |
| 885 | for (e1 = e, i = 0; nfrags--; i++) { |
| 886 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
| 887 | |
| 888 | ce++; |
| 889 | e1++; |
| 890 | if (++pidx == q->size) { |
| 891 | pidx = 0; |
| 892 | gen ^= 1; |
| 893 | ce = q->centries; |
| 894 | e1 = q->entries; |
| 895 | } |
| 896 | |
| 897 | mapping = pci_map_page(adapter->pdev, frag->page, |
| 898 | frag->page_offset, frag->size, |
| 899 | PCI_DMA_TODEVICE); |
| 900 | ce->skb = NULL; |
| 901 | pci_unmap_addr_set(ce, dma_addr, mapping); |
| 902 | pci_unmap_len_set(ce, dma_len, frag->size); |
| 903 | |
| 904 | e1->addr_lo = (u32)mapping; |
| 905 | e1->addr_hi = (u64)mapping >> 32; |
| 906 | e1->len_gen = V_CMD_LEN(frag->size) | V_CMD_GEN1(gen); |
| 907 | e1->flags = F_CMD_DATAVALID | V_CMD_EOP(nfrags == 0) | |
| 908 | V_CMD_GEN2(gen); |
| 909 | } |
| 910 | |
| 911 | ce->skb = skb; |
| 912 | wmb(); |
| 913 | e->flags = flags; |
| 914 | } |
| 915 | |
| 916 | /* |
| 917 | * Clean up completed Tx buffers. |
| 918 | */ |
| 919 | static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q) |
| 920 | { |
| 921 | unsigned int reclaim = q->processed - q->cleaned; |
| 922 | |
| 923 | if (reclaim) { |
| 924 | free_cmdQ_buffers(sge, q, reclaim); |
| 925 | q->cleaned += reclaim; |
| 926 | } |
| 927 | } |
| 928 | |
| 929 | #ifndef SET_ETHTOOL_OPS |
| 930 | # define __netif_rx_complete(dev) netif_rx_complete(dev) |
| 931 | #endif |
| 932 | |
| 933 | /* |
| 934 | * We cannot use the standard netif_rx_schedule_prep() because we have multiple |
| 935 | * ports plus the TOE all multiplexing onto a single response queue, therefore |
| 936 | * accepting new responses cannot depend on the state of any particular port. |
| 937 | * So define our own equivalent that omits the netif_running() test. |
| 938 | */ |
| 939 | static inline int napi_schedule_prep(struct net_device *dev) |
| 940 | { |
| 941 | return !test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state); |
| 942 | } |
| 943 | |
| 944 | |
| 945 | /** |
| 946 | * sge_rx - process an ingress ethernet packet |
| 947 | * @sge: the sge structure |
| 948 | * @fl: the free list that contains the packet buffer |
| 949 | * @len: the packet length |
| 950 | * |
| 951 | * Process an ingress ethernet pakcet and deliver it to the stack. |
| 952 | */ |
| 953 | static int sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len) |
| 954 | { |
| 955 | struct sk_buff *skb; |
| 956 | struct cpl_rx_pkt *p; |
| 957 | struct adapter *adapter = sge->adapter; |
| 958 | |
| 959 | sge->stats.ethernet_pkts++; |
| 960 | skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad, |
| 961 | sge->rx_pkt_pad, 2, SGE_RX_COPY_THRES, |
| 962 | SGE_RX_DROP_THRES); |
| 963 | if (!skb) { |
| 964 | sge->port_stats[0].rx_drops++; /* charge only port 0 for now */ |
| 965 | return 0; |
| 966 | } |
| 967 | |
| 968 | p = (struct cpl_rx_pkt *)skb->data; |
| 969 | skb_pull(skb, sizeof(*p)); |
| 970 | skb->dev = adapter->port[p->iff].dev; |
| 971 | skb->dev->last_rx = jiffies; |
| 972 | skb->protocol = eth_type_trans(skb, skb->dev); |
| 973 | if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff && |
| 974 | skb->protocol == htons(ETH_P_IP) && |
| 975 | (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) { |
| 976 | sge->port_stats[p->iff].rx_cso_good++; |
| 977 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
| 978 | } else |
| 979 | skb->ip_summed = CHECKSUM_NONE; |
| 980 | |
| 981 | if (unlikely(adapter->vlan_grp && p->vlan_valid)) { |
| 982 | sge->port_stats[p->iff].vlan_xtract++; |
| 983 | if (adapter->params.sge.polling) |
| 984 | vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, |
| 985 | ntohs(p->vlan)); |
| 986 | else |
| 987 | vlan_hwaccel_rx(skb, adapter->vlan_grp, |
| 988 | ntohs(p->vlan)); |
| 989 | } else if (adapter->params.sge.polling) |
| 990 | netif_receive_skb(skb); |
| 991 | else |
| 992 | netif_rx(skb); |
| 993 | return 0; |
| 994 | } |
| 995 | |
| 996 | /* |
| 997 | * Returns true if a command queue has enough available descriptors that |
| 998 | * we can resume Tx operation after temporarily disabling its packet queue. |
| 999 | */ |
| 1000 | static inline int enough_free_Tx_descs(const struct cmdQ *q) |
| 1001 | { |
| 1002 | unsigned int r = q->processed - q->cleaned; |
| 1003 | |
| 1004 | return q->in_use - r < (q->size >> 1); |
| 1005 | } |
| 1006 | |
| 1007 | /* |
| 1008 | * Called when sufficient space has become available in the SGE command queues |
| 1009 | * after the Tx packet schedulers have been suspended to restart the Tx path. |
| 1010 | */ |
| 1011 | static void restart_tx_queues(struct sge *sge) |
| 1012 | { |
| 1013 | struct adapter *adap = sge->adapter; |
| 1014 | |
| 1015 | if (enough_free_Tx_descs(&sge->cmdQ[0])) { |
| 1016 | int i; |
| 1017 | |
| 1018 | for_each_port(adap, i) { |
| 1019 | struct net_device *nd = adap->port[i].dev; |
| 1020 | |
| 1021 | if (test_and_clear_bit(nd->if_port, |
| 1022 | &sge->stopped_tx_queues) && |
| 1023 | netif_running(nd)) { |
| 1024 | sge->stats.cmdQ_restarted[3]++; |
| 1025 | netif_wake_queue(nd); |
| 1026 | } |
| 1027 | } |
| 1028 | } |
| 1029 | } |
| 1030 | |
| 1031 | /* |
| 1032 | * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0 |
| 1033 | * information. |
| 1034 | */ |
| 1035 | static unsigned int update_tx_info(struct adapter *adapter, |
| 1036 | unsigned int flags, |
| 1037 | unsigned int pr0) |
| 1038 | { |
| 1039 | struct sge *sge = adapter->sge; |
| 1040 | struct cmdQ *cmdq = &sge->cmdQ[0]; |
| 1041 | |
| 1042 | cmdq->processed += pr0; |
| 1043 | |
| 1044 | if (flags & F_CMDQ0_ENABLE) { |
| 1045 | clear_bit(CMDQ_STAT_RUNNING, &cmdq->status); |
| 1046 | |
| 1047 | if (cmdq->cleaned + cmdq->in_use != cmdq->processed && |
| 1048 | !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB, &cmdq->status)) { |
| 1049 | set_bit(CMDQ_STAT_RUNNING, &cmdq->status); |
| 1050 | writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL); |
| 1051 | } |
| 1052 | flags &= ~F_CMDQ0_ENABLE; |
| 1053 | } |
| 1054 | |
| 1055 | if (unlikely(sge->stopped_tx_queues != 0)) |
| 1056 | restart_tx_queues(sge); |
| 1057 | |
| 1058 | return flags; |
| 1059 | } |
| 1060 | |
| 1061 | /* |
| 1062 | * Process SGE responses, up to the supplied budget. Returns the number of |
| 1063 | * responses processed. A negative budget is effectively unlimited. |
| 1064 | */ |
| 1065 | static int process_responses(struct adapter *adapter, int budget) |
| 1066 | { |
| 1067 | struct sge *sge = adapter->sge; |
| 1068 | struct respQ *q = &sge->respQ; |
| 1069 | struct respQ_e *e = &q->entries[q->cidx]; |
| 1070 | int budget_left = budget; |
| 1071 | unsigned int flags = 0; |
| 1072 | unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0}; |
| 1073 | |
| 1074 | |
| 1075 | while (likely(budget_left && e->GenerationBit == q->genbit)) { |
| 1076 | flags |= e->Qsleeping; |
| 1077 | |
| 1078 | cmdq_processed[0] += e->Cmdq0CreditReturn; |
| 1079 | cmdq_processed[1] += e->Cmdq1CreditReturn; |
| 1080 | |
| 1081 | /* We batch updates to the TX side to avoid cacheline |
| 1082 | * ping-pong of TX state information on MP where the sender |
| 1083 | * might run on a different CPU than this function... |
| 1084 | */ |
| 1085 | if (unlikely(flags & F_CMDQ0_ENABLE || cmdq_processed[0] > 64)) { |
| 1086 | flags = update_tx_info(adapter, flags, cmdq_processed[0]); |
| 1087 | cmdq_processed[0] = 0; |
| 1088 | } |
| 1089 | if (unlikely(cmdq_processed[1] > 16)) { |
| 1090 | sge->cmdQ[1].processed += cmdq_processed[1]; |
| 1091 | cmdq_processed[1] = 0; |
| 1092 | } |
| 1093 | if (likely(e->DataValid)) { |
| 1094 | struct freelQ *fl = &sge->freelQ[e->FreelistQid]; |
| 1095 | |
| 1096 | if (unlikely(!e->Sop || !e->Eop)) |
| 1097 | BUG(); |
| 1098 | if (unlikely(e->Offload)) |
| 1099 | unexpected_offload(adapter, fl); |
| 1100 | else |
| 1101 | sge_rx(sge, fl, e->BufferLength); |
| 1102 | |
| 1103 | /* |
| 1104 | * Note: this depends on each packet consuming a |
| 1105 | * single free-list buffer; cf. the BUG above. |
| 1106 | */ |
| 1107 | if (++fl->cidx == fl->size) |
| 1108 | fl->cidx = 0; |
| 1109 | if (unlikely(--fl->credits < |
| 1110 | fl->size - SGE_FREEL_REFILL_THRESH)) |
| 1111 | refill_free_list(sge, fl); |
| 1112 | } else |
| 1113 | sge->stats.pure_rsps++; |
| 1114 | |
| 1115 | e++; |
| 1116 | if (unlikely(++q->cidx == q->size)) { |
| 1117 | q->cidx = 0; |
| 1118 | q->genbit ^= 1; |
| 1119 | e = q->entries; |
| 1120 | } |
| 1121 | prefetch(e); |
| 1122 | |
| 1123 | if (++q->credits > SGE_RESPQ_REPLENISH_THRES) { |
| 1124 | writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT); |
| 1125 | q->credits = 0; |
| 1126 | } |
| 1127 | --budget_left; |
| 1128 | } |
| 1129 | |
| 1130 | flags = update_tx_info(adapter, flags, cmdq_processed[0]); |
| 1131 | sge->cmdQ[1].processed += cmdq_processed[1]; |
| 1132 | |
| 1133 | budget -= budget_left; |
| 1134 | return budget; |
| 1135 | } |
| 1136 | |
| 1137 | /* |
| 1138 | * A simpler version of process_responses() that handles only pure (i.e., |
| 1139 | * non data-carrying) responses. Such respones are too light-weight to justify |
| 1140 | * calling a softirq when using NAPI, so we handle them specially in hard |
| 1141 | * interrupt context. The function is called with a pointer to a response, |
| 1142 | * which the caller must ensure is a valid pure response. Returns 1 if it |
| 1143 | * encounters a valid data-carrying response, 0 otherwise. |
| 1144 | */ |
| 1145 | static int process_pure_responses(struct adapter *adapter, struct respQ_e *e) |
| 1146 | { |
| 1147 | struct sge *sge = adapter->sge; |
| 1148 | struct respQ *q = &sge->respQ; |
| 1149 | unsigned int flags = 0; |
| 1150 | unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0}; |
| 1151 | |
| 1152 | do { |
| 1153 | flags |= e->Qsleeping; |
| 1154 | |
| 1155 | cmdq_processed[0] += e->Cmdq0CreditReturn; |
| 1156 | cmdq_processed[1] += e->Cmdq1CreditReturn; |
| 1157 | |
| 1158 | e++; |
| 1159 | if (unlikely(++q->cidx == q->size)) { |
| 1160 | q->cidx = 0; |
| 1161 | q->genbit ^= 1; |
| 1162 | e = q->entries; |
| 1163 | } |
| 1164 | prefetch(e); |
| 1165 | |
| 1166 | if (++q->credits > SGE_RESPQ_REPLENISH_THRES) { |
| 1167 | writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT); |
| 1168 | q->credits = 0; |
| 1169 | } |
| 1170 | sge->stats.pure_rsps++; |
| 1171 | } while (e->GenerationBit == q->genbit && !e->DataValid); |
| 1172 | |
| 1173 | flags = update_tx_info(adapter, flags, cmdq_processed[0]); |
| 1174 | sge->cmdQ[1].processed += cmdq_processed[1]; |
| 1175 | |
| 1176 | return e->GenerationBit == q->genbit; |
| 1177 | } |
| 1178 | |
| 1179 | /* |
| 1180 | * Handler for new data events when using NAPI. This does not need any locking |
| 1181 | * or protection from interrupts as data interrupts are off at this point and |
| 1182 | * other adapter interrupts do not interfere. |
| 1183 | */ |
| 1184 | static int t1_poll(struct net_device *dev, int *budget) |
| 1185 | { |
| 1186 | struct adapter *adapter = dev->priv; |
| 1187 | int effective_budget = min(*budget, dev->quota); |
| 1188 | |
| 1189 | int work_done = process_responses(adapter, effective_budget); |
| 1190 | *budget -= work_done; |
| 1191 | dev->quota -= work_done; |
| 1192 | |
| 1193 | if (work_done >= effective_budget) |
| 1194 | return 1; |
| 1195 | |
| 1196 | __netif_rx_complete(dev); |
| 1197 | |
| 1198 | /* |
| 1199 | * Because we don't atomically flush the following write it is |
| 1200 | * possible that in very rare cases it can reach the device in a way |
| 1201 | * that races with a new response being written plus an error interrupt |
| 1202 | * causing the NAPI interrupt handler below to return unhandled status |
| 1203 | * to the OS. To protect against this would require flushing the write |
| 1204 | * and doing both the write and the flush with interrupts off. Way too |
| 1205 | * expensive and unjustifiable given the rarity of the race. |
| 1206 | */ |
| 1207 | writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); |
| 1208 | return 0; |
| 1209 | } |
| 1210 | |
| 1211 | /* |
| 1212 | * Returns true if the device is already scheduled for polling. |
| 1213 | */ |
| 1214 | static inline int napi_is_scheduled(struct net_device *dev) |
| 1215 | { |
| 1216 | return test_bit(__LINK_STATE_RX_SCHED, &dev->state); |
| 1217 | } |
| 1218 | |
| 1219 | /* |
| 1220 | * NAPI version of the main interrupt handler. |
| 1221 | */ |
| 1222 | static irqreturn_t t1_interrupt_napi(int irq, void *data, struct pt_regs *regs) |
| 1223 | { |
| 1224 | int handled; |
| 1225 | struct adapter *adapter = data; |
| 1226 | struct sge *sge = adapter->sge; |
| 1227 | struct respQ *q = &adapter->sge->respQ; |
| 1228 | |
| 1229 | /* |
| 1230 | * Clear the SGE_DATA interrupt first thing. Normally the NAPI |
| 1231 | * handler has control of the response queue and the interrupt handler |
| 1232 | * can look at the queue reliably only once it knows NAPI is off. |
| 1233 | * We can't wait that long to clear the SGE_DATA interrupt because we |
| 1234 | * could race with t1_poll rearming the SGE interrupt, so we need to |
| 1235 | * clear the interrupt speculatively and really early on. |
| 1236 | */ |
| 1237 | writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE); |
| 1238 | |
| 1239 | spin_lock(&adapter->async_lock); |
| 1240 | if (!napi_is_scheduled(sge->netdev)) { |
| 1241 | struct respQ_e *e = &q->entries[q->cidx]; |
| 1242 | |
| 1243 | if (e->GenerationBit == q->genbit) { |
| 1244 | if (e->DataValid || |
| 1245 | process_pure_responses(adapter, e)) { |
| 1246 | if (likely(napi_schedule_prep(sge->netdev))) |
| 1247 | __netif_rx_schedule(sge->netdev); |
| 1248 | else |
| 1249 | printk(KERN_CRIT |
| 1250 | "NAPI schedule failure!\n"); |
| 1251 | } else |
| 1252 | writel(q->cidx, adapter->regs + A_SG_SLEEPING); |
| 1253 | handled = 1; |
| 1254 | goto unlock; |
| 1255 | } else |
| 1256 | writel(q->cidx, adapter->regs + A_SG_SLEEPING); |
| 1257 | } else |
| 1258 | if (readl(adapter->regs + A_PL_CAUSE) & F_PL_INTR_SGE_DATA) |
| 1259 | printk(KERN_ERR "data interrupt while NAPI running\n"); |
| 1260 | |
| 1261 | handled = t1_slow_intr_handler(adapter); |
| 1262 | if (!handled) |
| 1263 | sge->stats.unhandled_irqs++; |
| 1264 | unlock: |
| 1265 | spin_unlock(&adapter->async_lock); |
| 1266 | return IRQ_RETVAL(handled != 0); |
| 1267 | } |
| 1268 | |
| 1269 | /* |
| 1270 | * Main interrupt handler, optimized assuming that we took a 'DATA' |
| 1271 | * interrupt. |
| 1272 | * |
| 1273 | * 1. Clear the interrupt |
| 1274 | * 2. Loop while we find valid descriptors and process them; accumulate |
| 1275 | * information that can be processed after the loop |
| 1276 | * 3. Tell the SGE at which index we stopped processing descriptors |
| 1277 | * 4. Bookkeeping; free TX buffers, ring doorbell if there are any |
| 1278 | * outstanding TX buffers waiting, replenish RX buffers, potentially |
| 1279 | * reenable upper layers if they were turned off due to lack of TX |
| 1280 | * resources which are available again. |
| 1281 | * 5. If we took an interrupt, but no valid respQ descriptors was found we |
| 1282 | * let the slow_intr_handler run and do error handling. |
| 1283 | */ |
| 1284 | static irqreturn_t t1_interrupt(int irq, void *cookie, struct pt_regs *regs) |
| 1285 | { |
| 1286 | int work_done; |
| 1287 | struct respQ_e *e; |
| 1288 | struct adapter *adapter = cookie; |
| 1289 | struct respQ *Q = &adapter->sge->respQ; |
| 1290 | |
| 1291 | spin_lock(&adapter->async_lock); |
| 1292 | e = &Q->entries[Q->cidx]; |
| 1293 | prefetch(e); |
| 1294 | |
| 1295 | writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE); |
| 1296 | |
| 1297 | if (likely(e->GenerationBit == Q->genbit)) |
| 1298 | work_done = process_responses(adapter, -1); |
| 1299 | else |
| 1300 | work_done = t1_slow_intr_handler(adapter); |
| 1301 | |
| 1302 | /* |
| 1303 | * The unconditional clearing of the PL_CAUSE above may have raced |
| 1304 | * with DMA completion and the corresponding generation of a response |
| 1305 | * to cause us to miss the resulting data interrupt. The next write |
| 1306 | * is also unconditional to recover the missed interrupt and render |
| 1307 | * this race harmless. |
| 1308 | */ |
| 1309 | writel(Q->cidx, adapter->regs + A_SG_SLEEPING); |
| 1310 | |
| 1311 | if (!work_done) |
| 1312 | adapter->sge->stats.unhandled_irqs++; |
| 1313 | spin_unlock(&adapter->async_lock); |
| 1314 | return IRQ_RETVAL(work_done != 0); |
| 1315 | } |
| 1316 | |
| 1317 | intr_handler_t t1_select_intr_handler(adapter_t *adapter) |
| 1318 | { |
| 1319 | return adapter->params.sge.polling ? t1_interrupt_napi : t1_interrupt; |
| 1320 | } |
| 1321 | |
| 1322 | /* |
| 1323 | * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it. |
| 1324 | * |
| 1325 | * The code figures out how many entries the sk_buff will require in the |
| 1326 | * cmdQ and updates the cmdQ data structure with the state once the enqueue |
| 1327 | * has complete. Then, it doesn't access the global structure anymore, but |
| 1328 | * uses the corresponding fields on the stack. In conjuction with a spinlock |
| 1329 | * around that code, we can make the function reentrant without holding the |
| 1330 | * lock when we actually enqueue (which might be expensive, especially on |
| 1331 | * architectures with IO MMUs). |
| 1332 | * |
| 1333 | * This runs with softirqs disabled. |
| 1334 | */ |
Stephen Hemminger | aa84505 | 2005-12-14 14:38:44 -0800 | [diff] [blame^] | 1335 | static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter, |
| 1336 | unsigned int qid, struct net_device *dev) |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 1337 | { |
| 1338 | struct sge *sge = adapter->sge; |
| 1339 | struct cmdQ *q = &sge->cmdQ[qid]; |
| 1340 | unsigned int credits, pidx, genbit, count; |
| 1341 | |
| 1342 | spin_lock(&q->lock); |
| 1343 | reclaim_completed_tx(sge, q); |
| 1344 | |
| 1345 | pidx = q->pidx; |
| 1346 | credits = q->size - q->in_use; |
| 1347 | count = 1 + skb_shinfo(skb)->nr_frags; |
| 1348 | |
| 1349 | { /* Ethernet packet */ |
| 1350 | if (unlikely(credits < count)) { |
| 1351 | netif_stop_queue(dev); |
| 1352 | set_bit(dev->if_port, &sge->stopped_tx_queues); |
| 1353 | sge->stats.cmdQ_full[3]++; |
| 1354 | spin_unlock(&q->lock); |
Stephen Hemminger | aa84505 | 2005-12-14 14:38:44 -0800 | [diff] [blame^] | 1355 | if (!netif_queue_stopped(dev)) |
| 1356 | CH_ERR("%s: Tx ring full while queue awake!\n", |
| 1357 | adapter->name); |
| 1358 | return NETDEV_TX_BUSY; |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 1359 | } |
| 1360 | if (unlikely(credits - count < q->stop_thres)) { |
| 1361 | sge->stats.cmdQ_full[3]++; |
| 1362 | netif_stop_queue(dev); |
| 1363 | set_bit(dev->if_port, &sge->stopped_tx_queues); |
| 1364 | } |
| 1365 | } |
| 1366 | q->in_use += count; |
| 1367 | genbit = q->genbit; |
| 1368 | q->pidx += count; |
| 1369 | if (q->pidx >= q->size) { |
| 1370 | q->pidx -= q->size; |
| 1371 | q->genbit ^= 1; |
| 1372 | } |
| 1373 | spin_unlock(&q->lock); |
| 1374 | |
| 1375 | write_tx_descs(adapter, skb, pidx, genbit, q); |
| 1376 | |
| 1377 | /* |
| 1378 | * We always ring the doorbell for cmdQ1. For cmdQ0, we only ring |
| 1379 | * the doorbell if the Q is asleep. There is a natural race, where |
| 1380 | * the hardware is going to sleep just after we checked, however, |
| 1381 | * then the interrupt handler will detect the outstanding TX packet |
| 1382 | * and ring the doorbell for us. |
| 1383 | */ |
| 1384 | if (qid) |
| 1385 | doorbell_pio(adapter, F_CMDQ1_ENABLE); |
| 1386 | else { |
| 1387 | clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); |
| 1388 | if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) { |
| 1389 | set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); |
| 1390 | writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL); |
| 1391 | } |
| 1392 | } |
Stephen Hemminger | aa84505 | 2005-12-14 14:38:44 -0800 | [diff] [blame^] | 1393 | return NETDEV_TX_OK; |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 1394 | } |
| 1395 | |
| 1396 | #define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14)) |
| 1397 | |
| 1398 | /* |
| 1399 | * eth_hdr_len - return the length of an Ethernet header |
| 1400 | * @data: pointer to the start of the Ethernet header |
| 1401 | * |
| 1402 | * Returns the length of an Ethernet header, including optional VLAN tag. |
| 1403 | */ |
| 1404 | static inline int eth_hdr_len(const void *data) |
| 1405 | { |
| 1406 | const struct ethhdr *e = data; |
| 1407 | |
| 1408 | return e->h_proto == htons(ETH_P_8021Q) ? VLAN_ETH_HLEN : ETH_HLEN; |
| 1409 | } |
| 1410 | |
| 1411 | /* |
| 1412 | * Adds the CPL header to the sk_buff and passes it to t1_sge_tx. |
| 1413 | */ |
| 1414 | int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) |
| 1415 | { |
| 1416 | struct adapter *adapter = dev->priv; |
| 1417 | struct sge_port_stats *st = &adapter->sge->port_stats[dev->if_port]; |
| 1418 | struct sge *sge = adapter->sge; |
| 1419 | struct cpl_tx_pkt *cpl; |
| 1420 | |
| 1421 | #ifdef NETIF_F_TSO |
| 1422 | if (skb_shinfo(skb)->tso_size) { |
| 1423 | int eth_type; |
| 1424 | struct cpl_tx_pkt_lso *hdr; |
| 1425 | |
| 1426 | st->tso++; |
| 1427 | |
| 1428 | eth_type = skb->nh.raw - skb->data == ETH_HLEN ? |
| 1429 | CPL_ETH_II : CPL_ETH_II_VLAN; |
| 1430 | |
| 1431 | hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr)); |
| 1432 | hdr->opcode = CPL_TX_PKT_LSO; |
| 1433 | hdr->ip_csum_dis = hdr->l4_csum_dis = 0; |
| 1434 | hdr->ip_hdr_words = skb->nh.iph->ihl; |
| 1435 | hdr->tcp_hdr_words = skb->h.th->doff; |
| 1436 | hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type, |
| 1437 | skb_shinfo(skb)->tso_size)); |
| 1438 | hdr->len = htonl(skb->len - sizeof(*hdr)); |
| 1439 | cpl = (struct cpl_tx_pkt *)hdr; |
| 1440 | sge->stats.tx_lso_pkts++; |
| 1441 | } else |
| 1442 | #endif |
| 1443 | { |
| 1444 | /* |
| 1445 | * Packets shorter than ETH_HLEN can break the MAC, drop them |
| 1446 | * early. Also, we may get oversized packets because some |
| 1447 | * parts of the kernel don't handle our unusual hard_header_len |
| 1448 | * right, drop those too. |
| 1449 | */ |
| 1450 | if (unlikely(skb->len < ETH_HLEN || |
| 1451 | skb->len > dev->mtu + eth_hdr_len(skb->data))) { |
| 1452 | dev_kfree_skb_any(skb); |
Stephen Hemminger | aa84505 | 2005-12-14 14:38:44 -0800 | [diff] [blame^] | 1453 | return NETDEV_TX_OK; |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 1454 | } |
| 1455 | |
| 1456 | /* |
| 1457 | * We are using a non-standard hard_header_len and some kernel |
| 1458 | * components, such as pktgen, do not handle it right. |
| 1459 | * Complain when this happens but try to fix things up. |
| 1460 | */ |
| 1461 | if (unlikely(skb_headroom(skb) < |
| 1462 | dev->hard_header_len - ETH_HLEN)) { |
| 1463 | struct sk_buff *orig_skb = skb; |
| 1464 | |
| 1465 | if (net_ratelimit()) |
| 1466 | printk(KERN_ERR "%s: inadequate headroom in " |
| 1467 | "Tx packet\n", dev->name); |
| 1468 | skb = skb_realloc_headroom(skb, sizeof(*cpl)); |
| 1469 | dev_kfree_skb_any(orig_skb); |
| 1470 | if (!skb) |
Stephen Hemminger | aa84505 | 2005-12-14 14:38:44 -0800 | [diff] [blame^] | 1471 | return NETDEV_TX_OK; |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 1472 | } |
| 1473 | |
| 1474 | if (!(adapter->flags & UDP_CSUM_CAPABLE) && |
| 1475 | skb->ip_summed == CHECKSUM_HW && |
| 1476 | skb->nh.iph->protocol == IPPROTO_UDP) |
| 1477 | if (unlikely(skb_checksum_help(skb, 0))) { |
| 1478 | dev_kfree_skb_any(skb); |
Stephen Hemminger | aa84505 | 2005-12-14 14:38:44 -0800 | [diff] [blame^] | 1479 | return NETDEV_TX_OK; |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 1480 | } |
| 1481 | |
| 1482 | /* Hmmm, assuming to catch the gratious arp... and we'll use |
| 1483 | * it to flush out stuck espi packets... |
| 1484 | */ |
| 1485 | if (unlikely(!adapter->sge->espibug_skb)) { |
| 1486 | if (skb->protocol == htons(ETH_P_ARP) && |
| 1487 | skb->nh.arph->ar_op == htons(ARPOP_REQUEST)) { |
| 1488 | adapter->sge->espibug_skb = skb; |
| 1489 | /* We want to re-use this skb later. We |
| 1490 | * simply bump the reference count and it |
| 1491 | * will not be freed... |
| 1492 | */ |
| 1493 | skb = skb_get(skb); |
| 1494 | } |
| 1495 | } |
| 1496 | |
| 1497 | cpl = (struct cpl_tx_pkt *)__skb_push(skb, sizeof(*cpl)); |
| 1498 | cpl->opcode = CPL_TX_PKT; |
| 1499 | cpl->ip_csum_dis = 1; /* SW calculates IP csum */ |
| 1500 | cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_HW ? 0 : 1; |
| 1501 | /* the length field isn't used so don't bother setting it */ |
| 1502 | |
| 1503 | st->tx_cso += (skb->ip_summed == CHECKSUM_HW); |
| 1504 | sge->stats.tx_do_cksum += (skb->ip_summed == CHECKSUM_HW); |
| 1505 | sge->stats.tx_reg_pkts++; |
| 1506 | } |
| 1507 | cpl->iff = dev->if_port; |
| 1508 | |
| 1509 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) |
| 1510 | if (adapter->vlan_grp && vlan_tx_tag_present(skb)) { |
| 1511 | cpl->vlan_valid = 1; |
| 1512 | cpl->vlan = htons(vlan_tx_tag_get(skb)); |
| 1513 | st->vlan_insert++; |
| 1514 | } else |
| 1515 | #endif |
| 1516 | cpl->vlan_valid = 0; |
| 1517 | |
| 1518 | dev->trans_start = jiffies; |
| 1519 | return t1_sge_tx(skb, adapter, 0, dev); |
| 1520 | } |
| 1521 | |
| 1522 | /* |
| 1523 | * Callback for the Tx buffer reclaim timer. Runs with softirqs disabled. |
| 1524 | */ |
| 1525 | static void sge_tx_reclaim_cb(unsigned long data) |
| 1526 | { |
| 1527 | int i; |
| 1528 | struct sge *sge = (struct sge *)data; |
| 1529 | |
| 1530 | for (i = 0; i < SGE_CMDQ_N; ++i) { |
| 1531 | struct cmdQ *q = &sge->cmdQ[i]; |
| 1532 | |
| 1533 | if (!spin_trylock(&q->lock)) |
| 1534 | continue; |
| 1535 | |
| 1536 | reclaim_completed_tx(sge, q); |
| 1537 | if (i == 0 && q->in_use) /* flush pending credits */ |
| 1538 | writel(F_CMDQ0_ENABLE, |
| 1539 | sge->adapter->regs + A_SG_DOORBELL); |
| 1540 | |
| 1541 | spin_unlock(&q->lock); |
| 1542 | } |
| 1543 | mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); |
| 1544 | } |
| 1545 | |
| 1546 | /* |
| 1547 | * Propagate changes of the SGE coalescing parameters to the HW. |
| 1548 | */ |
| 1549 | int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p) |
| 1550 | { |
| 1551 | sge->netdev->poll = t1_poll; |
| 1552 | sge->fixed_intrtimer = p->rx_coalesce_usecs * |
| 1553 | core_ticks_per_usec(sge->adapter); |
| 1554 | writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER); |
| 1555 | return 0; |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 1556 | } |
| 1557 | |
| 1558 | /* |
| 1559 | * Allocates both RX and TX resources and configures the SGE. However, |
| 1560 | * the hardware is not enabled yet. |
| 1561 | */ |
| 1562 | int t1_sge_configure(struct sge *sge, struct sge_params *p) |
| 1563 | { |
| 1564 | if (alloc_rx_resources(sge, p)) |
| 1565 | return -ENOMEM; |
| 1566 | if (alloc_tx_resources(sge, p)) { |
| 1567 | free_rx_resources(sge); |
| 1568 | return -ENOMEM; |
| 1569 | } |
| 1570 | configure_sge(sge, p); |
| 1571 | |
| 1572 | /* |
| 1573 | * Now that we have sized the free lists calculate the payload |
| 1574 | * capacity of the large buffers. Other parts of the driver use |
| 1575 | * this to set the max offload coalescing size so that RX packets |
| 1576 | * do not overflow our large buffers. |
| 1577 | */ |
| 1578 | p->large_buf_capacity = jumbo_payload_capacity(sge); |
| 1579 | return 0; |
| 1580 | } |
| 1581 | |
| 1582 | /* |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 1583 | * Disables the DMA engine. |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 1584 | */ |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 1585 | void t1_sge_stop(struct sge *sge) |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 1586 | { |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 1587 | writel(0, sge->adapter->regs + A_SG_CONTROL); |
| 1588 | (void) readl(sge->adapter->regs + A_SG_CONTROL); /* flush */ |
| 1589 | if (is_T2(sge->adapter)) |
| 1590 | del_timer_sync(&sge->espibug_timer); |
| 1591 | del_timer_sync(&sge->tx_reclaim_timer); |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 1592 | } |
| 1593 | |
| 1594 | /* |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 1595 | * Enables the DMA engine. |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 1596 | */ |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 1597 | void t1_sge_start(struct sge *sge) |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 1598 | { |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 1599 | refill_free_list(sge, &sge->freelQ[0]); |
| 1600 | refill_free_list(sge, &sge->freelQ[1]); |
| 1601 | |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 1602 | writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL); |
| 1603 | doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE); |
| 1604 | (void) readl(sge->adapter->regs + A_SG_CONTROL); /* flush */ |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 1605 | |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 1606 | mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 1607 | |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 1608 | if (is_T2(sge->adapter)) |
| 1609 | mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 1610 | } |
| 1611 | |
| 1612 | /* |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 1613 | * Callback for the T2 ESPI 'stuck packet feature' workaorund |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 1614 | */ |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 1615 | static void espibug_workaround(void *data) |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 1616 | { |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 1617 | struct adapter *adapter = (struct adapter *)data; |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 1618 | struct sge *sge = adapter->sge; |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 1619 | |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 1620 | if (netif_running(adapter->port[0].dev)) { |
| 1621 | struct sk_buff *skb = sge->espibug_skb; |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 1622 | |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 1623 | u32 seop = t1_espi_get_mon(adapter, 0x930, 0); |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 1624 | |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 1625 | if ((seop & 0xfff0fff) == 0xfff && skb) { |
| 1626 | if (!skb->cb[0]) { |
| 1627 | u8 ch_mac_addr[ETH_ALEN] = |
| 1628 | {0x0, 0x7, 0x43, 0x0, 0x0, 0x0}; |
| 1629 | memcpy(skb->data + sizeof(struct cpl_tx_pkt), |
| 1630 | ch_mac_addr, ETH_ALEN); |
| 1631 | memcpy(skb->data + skb->len - 10, ch_mac_addr, |
| 1632 | ETH_ALEN); |
| 1633 | skb->cb[0] = 0xff; |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 1634 | } |
| 1635 | |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 1636 | /* bump the reference count to avoid freeing of the |
| 1637 | * skb once the DMA has completed. |
| 1638 | */ |
| 1639 | skb = skb_get(skb); |
| 1640 | t1_sge_tx(skb, adapter, 0, adapter->port[0].dev); |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 1641 | } |
| 1642 | } |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 1643 | mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 1644 | } |
| 1645 | |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 1646 | /* |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 1647 | * Creates a t1_sge structure and returns suggested resource parameters. |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 1648 | */ |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 1649 | struct sge * __devinit t1_sge_create(struct adapter *adapter, |
| 1650 | struct sge_params *p) |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 1651 | { |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 1652 | struct sge *sge = kmalloc(sizeof(*sge), GFP_KERNEL); |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 1653 | |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 1654 | if (!sge) |
| 1655 | return NULL; |
| 1656 | memset(sge, 0, sizeof(*sge)); |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 1657 | |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 1658 | sge->adapter = adapter; |
| 1659 | sge->netdev = adapter->port[0].dev; |
| 1660 | sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2; |
| 1661 | sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0; |
| 1662 | |
| 1663 | init_timer(&sge->tx_reclaim_timer); |
| 1664 | sge->tx_reclaim_timer.data = (unsigned long)sge; |
| 1665 | sge->tx_reclaim_timer.function = sge_tx_reclaim_cb; |
| 1666 | |
| 1667 | if (is_T2(sge->adapter)) { |
| 1668 | init_timer(&sge->espibug_timer); |
| 1669 | sge->espibug_timer.function = (void *)&espibug_workaround; |
| 1670 | sge->espibug_timer.data = (unsigned long)sge->adapter; |
| 1671 | sge->espibug_timeout = 1; |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 1672 | } |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 1673 | |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 1674 | |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 1675 | p->cmdQ_size[0] = SGE_CMDQ0_E_N; |
| 1676 | p->cmdQ_size[1] = SGE_CMDQ1_E_N; |
| 1677 | p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE; |
| 1678 | p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE; |
| 1679 | p->rx_coalesce_usecs = 50; |
| 1680 | p->coalesce_enable = 0; |
| 1681 | p->sample_interval_usecs = 0; |
| 1682 | p->polling = 0; |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 1683 | |
Scott Bardone | 559fb51 | 2005-06-23 01:40:19 -0400 | [diff] [blame] | 1684 | return sge; |
Christoph Lameter | 8199d3a | 2005-03-30 13:34:31 -0800 | [diff] [blame] | 1685 | } |