blob: 0013642903eecda0e8226579176de6f2f93babc5 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 drivers/net/tulip/interrupt.c
3
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 Copyright 2000,2001 The Linux Kernel Team
5 Written/copyright 1994-2001 by Donald Becker.
6
7 This software may be used and distributed according to the terms
8 of the GNU General Public License, incorporated herein by reference.
9
10 Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
Grant Grundler78a65512008-06-05 00:38:55 -060011 for more information on this driver.
12 Please submit bugs to http://bugzilla.kernel.org/ .
Linus Torvalds1da177e2005-04-16 15:20:36 -070013
14*/
15
16#include <linux/pci.h>
17#include "tulip.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/etherdevice.h>
19
20int tulip_rx_copybreak;
21unsigned int tulip_max_interrupt_work;
22
23#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
24#define MIT_SIZE 15
25#define MIT_TABLE 15 /* We use 0 or max */
26
27static unsigned int mit_table[MIT_SIZE+1] =
28{
29 /* CRS11 21143 hardware Mitigation Control Interrupt
30 We use only RX mitigation we other techniques for
31 TX intr. mitigation.
32
33 31 Cycle Size (timer control)
34 30:27 TX timer in 16 * Cycle size
35 26:24 TX No pkts before Int.
36 23:20 RX timer in Cycle size
37 19:17 RX No pkts before Int.
38 16 Continues Mode (CM)
39 */
40
41 0x0, /* IM disabled */
42 0x80150000, /* RX time = 1, RX pkts = 2, CM = 1 */
43 0x80150000,
44 0x80270000,
45 0x80370000,
46 0x80490000,
47 0x80590000,
48 0x80690000,
49 0x807B0000,
50 0x808B0000,
51 0x809D0000,
52 0x80AD0000,
53 0x80BD0000,
54 0x80CF0000,
55 0x80DF0000,
56// 0x80FF0000 /* RX time = 16, RX pkts = 7, CM = 1 */
57 0x80F10000 /* RX time = 16, RX pkts = 0, CM = 1 */
58};
59#endif
60
61
62int tulip_refill_rx(struct net_device *dev)
63{
64 struct tulip_private *tp = netdev_priv(dev);
65 int entry;
66 int refilled = 0;
67
68 /* Refill the Rx ring buffers. */
69 for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
70 entry = tp->dirty_rx % RX_RING_SIZE;
71 if (tp->rx_buffers[entry].skb == NULL) {
72 struct sk_buff *skb;
73 dma_addr_t mapping;
74
75 skb = tp->rx_buffers[entry].skb = dev_alloc_skb(PKT_BUF_SZ);
76 if (skb == NULL)
77 break;
78
David S. Miller689be432005-06-28 15:25:31 -070079 mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ,
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 PCI_DMA_FROMDEVICE);
81 tp->rx_buffers[entry].mapping = mapping;
82
83 skb->dev = dev; /* Mark as being used by this device. */
84 tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
85 refilled++;
86 }
87 tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
88 }
89 if(tp->chip_id == LC82C168) {
90 if(((ioread32(tp->base_addr + CSR5)>>17)&0x07) == 4) {
91 /* Rx stopped due to out of buffers,
92 * restart it
93 */
94 iowrite32(0x01, tp->base_addr + CSR2);
95 }
96 }
97 return refilled;
98}
99
100#ifdef CONFIG_TULIP_NAPI
101
102void oom_timer(unsigned long data)
103{
104 struct net_device *dev = (struct net_device *)data;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700105 struct tulip_private *tp = netdev_priv(dev);
Ben Hutchings288379f2009-01-19 16:43:59 -0800106 napi_schedule(&tp->napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107}
108
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700109int tulip_poll(struct napi_struct *napi, int budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110{
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700111 struct tulip_private *tp = container_of(napi, struct tulip_private, napi);
112 struct net_device *dev = tp->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 int entry = tp->cur_rx % RX_RING_SIZE;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700114 int work_done = 0;
115#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116 int received = 0;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700117#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
120
121/* that one buffer is needed for mit activation; or might be a
122 bug in the ring buffer code; check later -- JHS*/
123
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700124 if (budget >=RX_RING_SIZE) budget--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125#endif
126
127 if (tulip_debug > 4)
Joe Perchesabe02af2010-01-28 20:59:22 +0000128 printk(KERN_DEBUG " In tulip_rx(), entry %d %08x\n",
129 entry, tp->rx_ring[entry].status);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
131 do {
132 if (ioread32(tp->base_addr + CSR5) == 0xffffffff) {
Joe Perchesabe02af2010-01-28 20:59:22 +0000133 printk(KERN_DEBUG " In tulip_poll(), hardware disappeared\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134 break;
135 }
136 /* Acknowledge current RX interrupt sources. */
137 iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400138
139
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140 /* If we own the next entry, it is a new packet. Send it up. */
141 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
142 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
Tomasz Lemiech1f8ae0a2009-03-13 15:43:38 -0700143 short pkt_len;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400144
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145 if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
146 break;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400147
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 if (tulip_debug > 5)
Joe Perchesabe02af2010-01-28 20:59:22 +0000149 printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %08x\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 dev->name, entry, status);
Stephen Hemmingerc6a1b622008-01-07 00:23:04 -0800151
152 if (++work_done >= budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 goto not_done;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400154
Tomasz Lemiech1f8ae0a2009-03-13 15:43:38 -0700155 /*
156 * Omit the four octet CRC from the length.
157 * (May not be considered valid until we have
158 * checked status for RxLengthOver2047 bits)
159 */
160 pkt_len = ((status >> 16) & 0x7ff) - 4;
161
162 /*
163 * Maximum pkt_len is 1518 (1514 + vlan header)
164 * Anything higher than this is always invalid
165 * regardless of RxLengthOver2047 bits
166 */
167
168 if ((status & (RxLengthOver2047 |
169 RxDescCRCError |
170 RxDescCollisionSeen |
171 RxDescRunt |
172 RxDescDescErr |
Joe Perches8e95a202009-12-03 07:58:21 +0000173 RxWholePkt)) != RxWholePkt ||
174 pkt_len > 1518) {
Tomasz Lemiech1f8ae0a2009-03-13 15:43:38 -0700175 if ((status & (RxLengthOver2047 |
176 RxWholePkt)) != RxWholePkt) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 /* Ingore earlier buffers. */
178 if ((status & 0xffff) != 0x7fff) {
179 if (tulip_debug > 1)
Joe Perchesabe02af2010-01-28 20:59:22 +0000180 dev_warn(&dev->dev,
181 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
182 status);
Eric Dumazet1a18aba2010-09-03 03:52:43 +0000183 dev->stats.rx_length_errors++;
184 }
Tomasz Lemiech1f8ae0a2009-03-13 15:43:38 -0700185 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 /* There was a fatal error. */
187 if (tulip_debug > 2)
Joe Perchesabe02af2010-01-28 20:59:22 +0000188 printk(KERN_DEBUG "%s: Receive error, Rx status %08x\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 dev->name, status);
Eric Dumazet1a18aba2010-09-03 03:52:43 +0000190 dev->stats.rx_errors++; /* end of a packet.*/
191 if (pkt_len > 1518 ||
192 (status & RxDescRunt))
193 dev->stats.rx_length_errors++;
Tomasz Lemiech1f8ae0a2009-03-13 15:43:38 -0700194
Eric Dumazet1a18aba2010-09-03 03:52:43 +0000195 if (status & 0x0004)
196 dev->stats.rx_frame_errors++;
197 if (status & 0x0002)
198 dev->stats.rx_crc_errors++;
199 if (status & 0x0001)
200 dev->stats.rx_fifo_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201 }
202 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 struct sk_buff *skb;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400204
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 /* Check if the packet is long enough to accept without copying
206 to a minimally-sized skbuff. */
Joe Perches8e95a202009-12-03 07:58:21 +0000207 if (pkt_len < tulip_rx_copybreak &&
208 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 skb_reserve(skb, 2); /* 16 byte align the IP header */
210 pci_dma_sync_single_for_cpu(tp->pdev,
211 tp->rx_buffers[entry].mapping,
212 pkt_len, PCI_DMA_FROMDEVICE);
213#if ! defined(__alpha__)
David S. Miller8c7b7fa2007-07-10 22:08:12 -0700214 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
215 pkt_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 skb_put(skb, pkt_len);
217#else
218 memcpy(skb_put(skb, pkt_len),
David S. Miller689be432005-06-28 15:25:31 -0700219 tp->rx_buffers[entry].skb->data,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 pkt_len);
221#endif
222 pci_dma_sync_single_for_device(tp->pdev,
223 tp->rx_buffers[entry].mapping,
224 pkt_len, PCI_DMA_FROMDEVICE);
225 } else { /* Pass up the skb already on the Rx ring. */
226 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
227 pkt_len);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400228
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229#ifndef final_version
230 if (tp->rx_buffers[entry].mapping !=
231 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
Joe Perchesabe02af2010-01-28 20:59:22 +0000232 dev_err(&dev->dev,
233 "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %08llx %p / %p\n",
234 le32_to_cpu(tp->rx_ring[entry].buffer1),
235 (unsigned long long)tp->rx_buffers[entry].mapping,
236 skb->head, temp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 }
238#endif
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400239
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
241 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400242
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 tp->rx_buffers[entry].skb = NULL;
244 tp->rx_buffers[entry].mapping = 0;
245 }
246 skb->protocol = eth_type_trans(skb, dev);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400247
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 netif_receive_skb(skb);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400249
Eric Dumazet1a18aba2010-09-03 03:52:43 +0000250 dev->stats.rx_packets++;
251 dev->stats.rx_bytes += pkt_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700253#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
254 received++;
255#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
257 entry = (++tp->cur_rx) % RX_RING_SIZE;
258 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4)
259 tulip_refill_rx(dev);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400260
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 }
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400262
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 /* New ack strategy... irq does not ack Rx any longer
264 hopefully this helps */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400265
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 /* Really bad things can happen here... If new packet arrives
267 * and an irq arrives (tx or just due to occasionally unset
268 * mask), it will be acked by irq handler, but new thread
269 * is not scheduled. It is major hole in design.
270 * No idea how to fix this if "playing with fire" will fail
271 * tomorrow (night 011029). If it will not fail, we won
272 * finally: amount of IO did not increase at all. */
273 } while ((ioread32(tp->base_addr + CSR5) & RxIntr));
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400274
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400276
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 /* We use this simplistic scheme for IM. It's proven by
278 real life installations. We can have IM enabled
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400279 continuesly but this would cause unnecessary latency.
280 Unfortunely we can't use all the NET_RX_* feedback here.
281 This would turn on IM for devices that is not contributing
282 to backlog congestion with unnecessary latency.
283
Michael Opdenacker59c51592007-05-09 08:57:56 +0200284 We monitor the device RX-ring and have:
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400285
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286 HW Interrupt Mitigation either ON or OFF.
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400287
288 ON: More then 1 pkt received (per intr.) OR we are dropping
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 OFF: Only 1 pkt received
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400290
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 Note. We only use min and max (0, 15) settings from mit_table */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400292
293
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 if( tp->flags & HAS_INTR_MITIGATION) {
295 if( received > 1 ) {
296 if( ! tp->mit_on ) {
297 tp->mit_on = 1;
298 iowrite32(mit_table[MIT_TABLE], tp->base_addr + CSR11);
299 }
300 }
301 else {
302 if( tp->mit_on ) {
303 tp->mit_on = 0;
304 iowrite32(0, tp->base_addr + CSR11);
305 }
306 }
307 }
308
309#endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400310
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 tulip_refill_rx(dev);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400312
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 /* If RX ring is not full we are out of memory. */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700314 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
315 goto oom;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400316
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 /* Remove us from polling list and enable RX intr. */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400318
Ben Hutchings288379f2009-01-19 16:43:59 -0800319 napi_complete(napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400321
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 /* The last op happens after poll completion. Which means the following:
323 * 1. it can race with disabling irqs in irq handler
324 * 2. it can race with dise/enabling irqs in other poll threads
325 * 3. if an irq raised after beginning loop, it will be immediately
326 * triggered here.
327 *
328 * Summarizing: the logic results in some redundant irqs both
329 * due to races in masking and due to too late acking of already
330 * processed irqs. But it must not result in losing events.
331 */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400332
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700333 return work_done;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400334
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 not_done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 ||
337 tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
338 tulip_refill_rx(dev);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400339
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700340 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
341 goto oom;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400342
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700343 return work_done;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400344
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 oom: /* Executed with RX ints disabled */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400346
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347 /* Start timer, stop polling, but do not enable rx interrupts. */
348 mod_timer(&tp->oom_timer, jiffies+1);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400349
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 /* Think: timer_pending() was an explicit signature of bug.
351 * Timer can be pending now but fired and completed
Ben Hutchings288379f2009-01-19 16:43:59 -0800352 * before we did napi_complete(). See? We would lose it. */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400353
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 /* remove ourselves from the polling list */
Ben Hutchings288379f2009-01-19 16:43:59 -0800355 napi_complete(napi);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400356
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700357 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358}
359
360#else /* CONFIG_TULIP_NAPI */
361
362static int tulip_rx(struct net_device *dev)
363{
364 struct tulip_private *tp = netdev_priv(dev);
365 int entry = tp->cur_rx % RX_RING_SIZE;
366 int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
367 int received = 0;
368
369 if (tulip_debug > 4)
Joe Perchesabe02af2010-01-28 20:59:22 +0000370 printk(KERN_DEBUG " In tulip_rx(), entry %d %08x\n",
371 entry, tp->rx_ring[entry].status);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 /* If we own the next entry, it is a new packet. Send it up. */
373 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
374 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
Tomasz Lemiech1f8ae0a2009-03-13 15:43:38 -0700375 short pkt_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376
377 if (tulip_debug > 5)
Joe Perchesabe02af2010-01-28 20:59:22 +0000378 printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %08x\n",
379 dev->name, entry, status);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 if (--rx_work_limit < 0)
381 break;
Tomasz Lemiech1f8ae0a2009-03-13 15:43:38 -0700382
383 /*
384 Omit the four octet CRC from the length.
385 (May not be considered valid until we have
386 checked status for RxLengthOver2047 bits)
387 */
388 pkt_len = ((status >> 16) & 0x7ff) - 4;
389 /*
390 Maximum pkt_len is 1518 (1514 + vlan header)
391 Anything higher than this is always invalid
392 regardless of RxLengthOver2047 bits
393 */
394
395 if ((status & (RxLengthOver2047 |
396 RxDescCRCError |
397 RxDescCollisionSeen |
398 RxDescRunt |
399 RxDescDescErr |
Joe Perches8e95a202009-12-03 07:58:21 +0000400 RxWholePkt)) != RxWholePkt ||
401 pkt_len > 1518) {
Tomasz Lemiech1f8ae0a2009-03-13 15:43:38 -0700402 if ((status & (RxLengthOver2047 |
403 RxWholePkt)) != RxWholePkt) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404 /* Ingore earlier buffers. */
405 if ((status & 0xffff) != 0x7fff) {
406 if (tulip_debug > 1)
Joe Perchesabe02af2010-01-28 20:59:22 +0000407 dev_warn(&dev->dev,
408 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
409 status);
Eric Dumazet1a18aba2010-09-03 03:52:43 +0000410 dev->stats.rx_length_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 }
Tomasz Lemiech1f8ae0a2009-03-13 15:43:38 -0700412 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413 /* There was a fatal error. */
414 if (tulip_debug > 2)
Joe Perchesabe02af2010-01-28 20:59:22 +0000415 printk(KERN_DEBUG "%s: Receive error, Rx status %08x\n",
416 dev->name, status);
Eric Dumazet1a18aba2010-09-03 03:52:43 +0000417 dev->stats.rx_errors++; /* end of a packet.*/
Tomasz Lemiech1f8ae0a2009-03-13 15:43:38 -0700418 if (pkt_len > 1518 ||
419 (status & RxDescRunt))
Eric Dumazet1a18aba2010-09-03 03:52:43 +0000420 dev->stats.rx_length_errors++;
421 if (status & 0x0004)
422 dev->stats.rx_frame_errors++;
423 if (status & 0x0002)
424 dev->stats.rx_crc_errors++;
425 if (status & 0x0001)
426 dev->stats.rx_fifo_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 }
428 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 struct sk_buff *skb;
430
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 /* Check if the packet is long enough to accept without copying
432 to a minimally-sized skbuff. */
Joe Perches8e95a202009-12-03 07:58:21 +0000433 if (pkt_len < tulip_rx_copybreak &&
434 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 skb_reserve(skb, 2); /* 16 byte align the IP header */
436 pci_dma_sync_single_for_cpu(tp->pdev,
437 tp->rx_buffers[entry].mapping,
438 pkt_len, PCI_DMA_FROMDEVICE);
439#if ! defined(__alpha__)
David S. Miller8c7b7fa2007-07-10 22:08:12 -0700440 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
441 pkt_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442 skb_put(skb, pkt_len);
443#else
444 memcpy(skb_put(skb, pkt_len),
David S. Miller689be432005-06-28 15:25:31 -0700445 tp->rx_buffers[entry].skb->data,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446 pkt_len);
447#endif
448 pci_dma_sync_single_for_device(tp->pdev,
449 tp->rx_buffers[entry].mapping,
450 pkt_len, PCI_DMA_FROMDEVICE);
451 } else { /* Pass up the skb already on the Rx ring. */
452 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
453 pkt_len);
454
455#ifndef final_version
456 if (tp->rx_buffers[entry].mapping !=
457 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
Joe Perchesabe02af2010-01-28 20:59:22 +0000458 dev_err(&dev->dev,
459 "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %Lx %p / %p\n",
460 le32_to_cpu(tp->rx_ring[entry].buffer1),
461 (long long)tp->rx_buffers[entry].mapping,
462 skb->head, temp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463 }
464#endif
465
466 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
467 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
468
469 tp->rx_buffers[entry].skb = NULL;
470 tp->rx_buffers[entry].mapping = 0;
471 }
472 skb->protocol = eth_type_trans(skb, dev);
473
474 netif_rx(skb);
475
Eric Dumazet1a18aba2010-09-03 03:52:43 +0000476 dev->stats.rx_packets++;
477 dev->stats.rx_bytes += pkt_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 }
479 received++;
480 entry = (++tp->cur_rx) % RX_RING_SIZE;
481 }
482 return received;
483}
484#endif /* CONFIG_TULIP_NAPI */
485
486static inline unsigned int phy_interrupt (struct net_device *dev)
487{
488#ifdef __hppa__
489 struct tulip_private *tp = netdev_priv(dev);
490 int csr12 = ioread32(tp->base_addr + CSR12) & 0xff;
491
492 if (csr12 != tp->csr12_shadow) {
493 /* ack interrupt */
494 iowrite32(csr12 | 0x02, tp->base_addr + CSR12);
495 tp->csr12_shadow = csr12;
496 /* do link change stuff */
497 spin_lock(&tp->lock);
498 tulip_check_duplex(dev);
499 spin_unlock(&tp->lock);
500 /* clear irq ack bit */
501 iowrite32(csr12 & ~0x02, tp->base_addr + CSR12);
502
503 return 1;
504 }
505#endif
506
507 return 0;
508}
509
510/* The interrupt handler does all of the Rx thread work and cleans up
511 after the Tx thread. */
David Howells7d12e782006-10-05 14:55:46 +0100512irqreturn_t tulip_interrupt(int irq, void *dev_instance)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513{
514 struct net_device *dev = (struct net_device *)dev_instance;
515 struct tulip_private *tp = netdev_priv(dev);
516 void __iomem *ioaddr = tp->base_addr;
517 int csr5;
518 int missed;
519 int rx = 0;
520 int tx = 0;
521 int oi = 0;
522 int maxrx = RX_RING_SIZE;
523 int maxtx = TX_RING_SIZE;
524 int maxoi = TX_RING_SIZE;
525#ifdef CONFIG_TULIP_NAPI
526 int rxd = 0;
527#else
528 int entry;
529#endif
530 unsigned int work_count = tulip_max_interrupt_work;
531 unsigned int handled = 0;
532
533 /* Let's see whether the interrupt really is for us */
534 csr5 = ioread32(ioaddr + CSR5);
535
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400536 if (tp->flags & HAS_PHY_IRQ)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 handled = phy_interrupt (dev);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400538
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
540 return IRQ_RETVAL(handled);
541
542 tp->nir++;
543
544 do {
545
546#ifdef CONFIG_TULIP_NAPI
547
548 if (!rxd && (csr5 & (RxIntr | RxNoBuf))) {
549 rxd++;
550 /* Mask RX intrs and add the device to poll list. */
551 iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7);
Ben Hutchings288379f2009-01-19 16:43:59 -0800552 napi_schedule(&tp->napi);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400553
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554 if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass)))
555 break;
556 }
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400557
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 /* Acknowledge the interrupt sources we handle here ASAP
559 the poll function does Rx and RxNoBuf acking */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400560
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5);
562
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400563#else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564 /* Acknowledge all of the current interrupt sources ASAP. */
565 iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5);
566
567
568 if (csr5 & (RxIntr | RxNoBuf)) {
569 rx += tulip_rx(dev);
570 tulip_refill_rx(dev);
571 }
572
573#endif /* CONFIG_TULIP_NAPI */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400574
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 if (tulip_debug > 4)
Joe Perchesabe02af2010-01-28 20:59:22 +0000576 printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 dev->name, csr5, ioread32(ioaddr + CSR5));
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400578
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579
580 if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
581 unsigned int dirty_tx;
582
583 spin_lock(&tp->lock);
584
585 for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
586 dirty_tx++) {
587 int entry = dirty_tx % TX_RING_SIZE;
588 int status = le32_to_cpu(tp->tx_ring[entry].status);
589
590 if (status < 0)
591 break; /* It still has not been Txed */
592
593 /* Check for Rx filter setup frames. */
594 if (tp->tx_buffers[entry].skb == NULL) {
595 /* test because dummy frames not mapped */
596 if (tp->tx_buffers[entry].mapping)
597 pci_unmap_single(tp->pdev,
598 tp->tx_buffers[entry].mapping,
599 sizeof(tp->setup_frame),
600 PCI_DMA_TODEVICE);
601 continue;
602 }
603
604 if (status & 0x8000) {
605 /* There was an major error, log it. */
606#ifndef final_version
607 if (tulip_debug > 1)
Joe Perchesabe02af2010-01-28 20:59:22 +0000608 printk(KERN_DEBUG "%s: Transmit error, Tx status %08x\n",
609 dev->name, status);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610#endif
Eric Dumazet1a18aba2010-09-03 03:52:43 +0000611 dev->stats.tx_errors++;
612 if (status & 0x4104)
613 dev->stats.tx_aborted_errors++;
614 if (status & 0x0C00)
615 dev->stats.tx_carrier_errors++;
616 if (status & 0x0200)
617 dev->stats.tx_window_errors++;
618 if (status & 0x0002)
619 dev->stats.tx_fifo_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 if ((status & 0x0080) && tp->full_duplex == 0)
Eric Dumazet1a18aba2010-09-03 03:52:43 +0000621 dev->stats.tx_heartbeat_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 } else {
Eric Dumazet1a18aba2010-09-03 03:52:43 +0000623 dev->stats.tx_bytes +=
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 tp->tx_buffers[entry].skb->len;
Eric Dumazet1a18aba2010-09-03 03:52:43 +0000625 dev->stats.collisions += (status >> 3) & 15;
626 dev->stats.tx_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 }
628
629 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
630 tp->tx_buffers[entry].skb->len,
631 PCI_DMA_TODEVICE);
632
633 /* Free the original skb. */
634 dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
635 tp->tx_buffers[entry].skb = NULL;
636 tp->tx_buffers[entry].mapping = 0;
637 tx++;
638 }
639
640#ifndef final_version
641 if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
Joe Perchesabe02af2010-01-28 20:59:22 +0000642 dev_err(&dev->dev,
643 "Out-of-sync dirty pointer, %d vs. %d\n",
644 dirty_tx, tp->cur_tx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 dirty_tx += TX_RING_SIZE;
646 }
647#endif
648
649 if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
650 netif_wake_queue(dev);
651
652 tp->dirty_tx = dirty_tx;
653 if (csr5 & TxDied) {
654 if (tulip_debug > 2)
Joe Perchesabe02af2010-01-28 20:59:22 +0000655 dev_warn(&dev->dev,
656 "The transmitter stopped. CSR5 is %x, CSR6 %x, new CSR6 %x\n",
657 csr5, ioread32(ioaddr + CSR6),
658 tp->csr6);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 tulip_restart_rxtx(tp);
660 }
661 spin_unlock(&tp->lock);
662 }
663
664 /* Log errors. */
665 if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */
666 if (csr5 == 0xffffffff)
667 break;
Eric Dumazet1a18aba2010-09-03 03:52:43 +0000668 if (csr5 & TxJabber)
669 dev->stats.tx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 if (csr5 & TxFIFOUnderflow) {
671 if ((tp->csr6 & 0xC000) != 0xC000)
672 tp->csr6 += 0x4000; /* Bump up the Tx threshold */
673 else
674 tp->csr6 |= 0x00200000; /* Store-n-forward. */
675 /* Restart the transmit process. */
676 tulip_restart_rxtx(tp);
677 iowrite32(0, ioaddr + CSR1);
678 }
679 if (csr5 & (RxDied | RxNoBuf)) {
680 if (tp->flags & COMET_MAC_ADDR) {
681 iowrite32(tp->mc_filter[0], ioaddr + 0xAC);
682 iowrite32(tp->mc_filter[1], ioaddr + 0xB0);
683 }
684 }
685 if (csr5 & RxDied) { /* Missed a Rx frame. */
Eric Dumazet1a18aba2010-09-03 03:52:43 +0000686 dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
687 dev->stats.rx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 tulip_start_rxtx(tp);
689 }
690 /*
691 * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
692 * call is ever done under the spinlock
693 */
694 if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
695 if (tp->link_change)
696 (tp->link_change)(dev, csr5);
697 }
Valerie Henson1ddb9862007-03-12 02:31:33 -0700698 if (csr5 & SystemError) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699 int error = (csr5 >> 23) & 7;
700 /* oops, we hit a PCI error. The code produced corresponds
701 * to the reason:
702 * 0 - parity error
703 * 1 - master abort
704 * 2 - target abort
705 * Note that on parity error, we should do a software reset
706 * of the chip to get it back into a sane state (according
707 * to the 21142/3 docs that is).
708 * -- rmk
709 */
Joe Perchesabe02af2010-01-28 20:59:22 +0000710 dev_err(&dev->dev,
711 "(%lu) System Error occurred (%d)\n",
712 tp->nir, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 }
714 /* Clear all error sources, included undocumented ones! */
715 iowrite32(0x0800f7ba, ioaddr + CSR5);
716 oi++;
717 }
718 if (csr5 & TimerInt) {
719
720 if (tulip_debug > 2)
Joe Perchesabe02af2010-01-28 20:59:22 +0000721 dev_err(&dev->dev,
722 "Re-enabling interrupts, %08x\n",
723 csr5);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
725 tp->ttimer = 0;
726 oi++;
727 }
728 if (tx > maxtx || rx > maxrx || oi > maxoi) {
729 if (tulip_debug > 1)
Joe Perchesabe02af2010-01-28 20:59:22 +0000730 dev_warn(&dev->dev, "Too much work during an interrupt, csr5=0x%08x. (%lu) (%d,%d,%d)\n",
731 csr5, tp->nir, tx, rx, oi);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732
733 /* Acknowledge all interrupt sources. */
734 iowrite32(0x8001ffff, ioaddr + CSR5);
735 if (tp->flags & HAS_INTR_MITIGATION) {
736 /* Josip Loncaric at ICASE did extensive experimentation
737 to develop a good interrupt mitigation setting.*/
738 iowrite32(0x8b240000, ioaddr + CSR11);
739 } else if (tp->chip_id == LC82C168) {
740 /* the LC82C168 doesn't have a hw timer.*/
741 iowrite32(0x00, ioaddr + CSR7);
742 mod_timer(&tp->timer, RUN_AT(HZ/50));
743 } else {
744 /* Mask all interrupting sources, set timer to
745 re-enable. */
746 iowrite32(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7);
747 iowrite32(0x0012, ioaddr + CSR11);
748 }
749 break;
750 }
751
752 work_count--;
753 if (work_count == 0)
754 break;
755
756 csr5 = ioread32(ioaddr + CSR5);
757
758#ifdef CONFIG_TULIP_NAPI
759 if (rxd)
760 csr5 &= ~RxPollInt;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400761 } while ((csr5 & (TxNoBuf |
762 TxDied |
763 TxIntr |
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 TimerInt |
765 /* Abnormal intr. */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400766 RxDied |
767 TxFIFOUnderflow |
768 TxJabber |
769 TPLnkFail |
Valerie Henson1ddb9862007-03-12 02:31:33 -0700770 SystemError )) != 0);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400771#else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772 } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
773
774 tulip_refill_rx(dev);
775
776 /* check if the card is in suspend mode */
777 entry = tp->dirty_rx % RX_RING_SIZE;
778 if (tp->rx_buffers[entry].skb == NULL) {
779 if (tulip_debug > 1)
Joe Perchesabe02af2010-01-28 20:59:22 +0000780 dev_warn(&dev->dev,
781 "in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n",
782 tp->nir, tp->cur_rx, tp->ttimer, rx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 if (tp->chip_id == LC82C168) {
784 iowrite32(0x00, ioaddr + CSR7);
785 mod_timer(&tp->timer, RUN_AT(HZ/50));
786 } else {
787 if (tp->ttimer == 0 || (ioread32(ioaddr + CSR11) & 0xffff) == 0) {
788 if (tulip_debug > 1)
Joe Perchesabe02af2010-01-28 20:59:22 +0000789 dev_warn(&dev->dev,
790 "in rx suspend mode: (%lu) set timer\n",
791 tp->nir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 iowrite32(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
793 ioaddr + CSR7);
794 iowrite32(TimerInt, ioaddr + CSR5);
795 iowrite32(12, ioaddr + CSR11);
796 tp->ttimer = 1;
797 }
798 }
799 }
800#endif /* CONFIG_TULIP_NAPI */
801
802 if ((missed = ioread32(ioaddr + CSR8) & 0x1ffff)) {
Eric Dumazet1a18aba2010-09-03 03:52:43 +0000803 dev->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 }
805
806 if (tulip_debug > 4)
Joe Perchesabe02af2010-01-28 20:59:22 +0000807 printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#04x\n",
808 dev->name, ioread32(ioaddr + CSR5));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809
810 return IRQ_HANDLED;
811}