blob: 1faf7a4d72024da4ffd864dfddb99502edd6fcc6 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 drivers/net/tulip/interrupt.c
3
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 Copyright 2000,2001 The Linux Kernel Team
5 Written/copyright 1994-2001 by Donald Becker.
6
7 This software may be used and distributed according to the terms
8 of the GNU General Public License, incorporated herein by reference.
9
10 Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
Grant Grundler78a65512008-06-05 00:38:55 -060011 for more information on this driver.
12 Please submit bugs to http://bugzilla.kernel.org/ .
Linus Torvalds1da177e2005-04-16 15:20:36 -070013
14*/
15
16#include <linux/pci.h>
17#include "tulip.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/etherdevice.h>
19
20int tulip_rx_copybreak;
21unsigned int tulip_max_interrupt_work;
22
23#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
24#define MIT_SIZE 15
25#define MIT_TABLE 15 /* We use 0 or max */
26
27static unsigned int mit_table[MIT_SIZE+1] =
28{
29 /* CRS11 21143 hardware Mitigation Control Interrupt
30 We use only RX mitigation we other techniques for
31 TX intr. mitigation.
32
33 31 Cycle Size (timer control)
34 30:27 TX timer in 16 * Cycle size
35 26:24 TX No pkts before Int.
36 23:20 RX timer in Cycle size
37 19:17 RX No pkts before Int.
38 16 Continues Mode (CM)
39 */
40
41 0x0, /* IM disabled */
42 0x80150000, /* RX time = 1, RX pkts = 2, CM = 1 */
43 0x80150000,
44 0x80270000,
45 0x80370000,
46 0x80490000,
47 0x80590000,
48 0x80690000,
49 0x807B0000,
50 0x808B0000,
51 0x809D0000,
52 0x80AD0000,
53 0x80BD0000,
54 0x80CF0000,
55 0x80DF0000,
56// 0x80FF0000 /* RX time = 16, RX pkts = 7, CM = 1 */
57 0x80F10000 /* RX time = 16, RX pkts = 0, CM = 1 */
58};
59#endif
60
61
62int tulip_refill_rx(struct net_device *dev)
63{
64 struct tulip_private *tp = netdev_priv(dev);
65 int entry;
66 int refilled = 0;
67
68 /* Refill the Rx ring buffers. */
69 for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
70 entry = tp->dirty_rx % RX_RING_SIZE;
71 if (tp->rx_buffers[entry].skb == NULL) {
72 struct sk_buff *skb;
73 dma_addr_t mapping;
74
75 skb = tp->rx_buffers[entry].skb = dev_alloc_skb(PKT_BUF_SZ);
76 if (skb == NULL)
77 break;
78
David S. Miller689be432005-06-28 15:25:31 -070079 mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ,
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 PCI_DMA_FROMDEVICE);
81 tp->rx_buffers[entry].mapping = mapping;
82
83 skb->dev = dev; /* Mark as being used by this device. */
84 tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
85 refilled++;
86 }
87 tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
88 }
89 if(tp->chip_id == LC82C168) {
90 if(((ioread32(tp->base_addr + CSR5)>>17)&0x07) == 4) {
91 /* Rx stopped due to out of buffers,
92 * restart it
93 */
94 iowrite32(0x01, tp->base_addr + CSR2);
95 }
96 }
97 return refilled;
98}
99
100#ifdef CONFIG_TULIP_NAPI
101
102void oom_timer(unsigned long data)
103{
104 struct net_device *dev = (struct net_device *)data;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700105 struct tulip_private *tp = netdev_priv(dev);
Ben Hutchings288379f2009-01-19 16:43:59 -0800106 napi_schedule(&tp->napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107}
108
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700109int tulip_poll(struct napi_struct *napi, int budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110{
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700111 struct tulip_private *tp = container_of(napi, struct tulip_private, napi);
112 struct net_device *dev = tp->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 int entry = tp->cur_rx % RX_RING_SIZE;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700114 int work_done = 0;
115#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116 int received = 0;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700117#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
120
121/* that one buffer is needed for mit activation; or might be a
122 bug in the ring buffer code; check later -- JHS*/
123
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700124 if (budget >=RX_RING_SIZE) budget--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125#endif
126
127 if (tulip_debug > 4)
Joe Perchesabe02af2010-01-28 20:59:22 +0000128 printk(KERN_DEBUG " In tulip_rx(), entry %d %08x\n",
129 entry, tp->rx_ring[entry].status);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
131 do {
132 if (ioread32(tp->base_addr + CSR5) == 0xffffffff) {
Joe Perchesabe02af2010-01-28 20:59:22 +0000133 printk(KERN_DEBUG " In tulip_poll(), hardware disappeared\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134 break;
135 }
136 /* Acknowledge current RX interrupt sources. */
137 iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400138
139
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140 /* If we own the next entry, it is a new packet. Send it up. */
141 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
142 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
Tomasz Lemiech1f8ae0a2009-03-13 15:43:38 -0700143 short pkt_len;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400144
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145 if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
146 break;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400147
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 if (tulip_debug > 5)
Joe Perchesabe02af2010-01-28 20:59:22 +0000149 printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %08x\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 dev->name, entry, status);
Stephen Hemmingerc6a1b622008-01-07 00:23:04 -0800151
152 if (++work_done >= budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 goto not_done;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400154
Tomasz Lemiech1f8ae0a2009-03-13 15:43:38 -0700155 /*
156 * Omit the four octet CRC from the length.
157 * (May not be considered valid until we have
158 * checked status for RxLengthOver2047 bits)
159 */
160 pkt_len = ((status >> 16) & 0x7ff) - 4;
161
162 /*
163 * Maximum pkt_len is 1518 (1514 + vlan header)
164 * Anything higher than this is always invalid
165 * regardless of RxLengthOver2047 bits
166 */
167
168 if ((status & (RxLengthOver2047 |
169 RxDescCRCError |
170 RxDescCollisionSeen |
171 RxDescRunt |
172 RxDescDescErr |
Joe Perches8e95a202009-12-03 07:58:21 +0000173 RxWholePkt)) != RxWholePkt ||
174 pkt_len > 1518) {
Tomasz Lemiech1f8ae0a2009-03-13 15:43:38 -0700175 if ((status & (RxLengthOver2047 |
176 RxWholePkt)) != RxWholePkt) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 /* Ingore earlier buffers. */
178 if ((status & 0xffff) != 0x7fff) {
179 if (tulip_debug > 1)
Joe Perchesabe02af2010-01-28 20:59:22 +0000180 dev_warn(&dev->dev,
181 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
182 status);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 tp->stats.rx_length_errors++;
184 }
Tomasz Lemiech1f8ae0a2009-03-13 15:43:38 -0700185 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 /* There was a fatal error. */
187 if (tulip_debug > 2)
Joe Perchesabe02af2010-01-28 20:59:22 +0000188 printk(KERN_DEBUG "%s: Receive error, Rx status %08x\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 dev->name, status);
190 tp->stats.rx_errors++; /* end of a packet.*/
Tomasz Lemiech1f8ae0a2009-03-13 15:43:38 -0700191 if (pkt_len > 1518 ||
192 (status & RxDescRunt))
193 tp->stats.rx_length_errors++;
194
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 if (status & 0x0004) tp->stats.rx_frame_errors++;
196 if (status & 0x0002) tp->stats.rx_crc_errors++;
197 if (status & 0x0001) tp->stats.rx_fifo_errors++;
198 }
199 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 struct sk_buff *skb;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400201
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 /* Check if the packet is long enough to accept without copying
203 to a minimally-sized skbuff. */
Joe Perches8e95a202009-12-03 07:58:21 +0000204 if (pkt_len < tulip_rx_copybreak &&
205 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 skb_reserve(skb, 2); /* 16 byte align the IP header */
207 pci_dma_sync_single_for_cpu(tp->pdev,
208 tp->rx_buffers[entry].mapping,
209 pkt_len, PCI_DMA_FROMDEVICE);
210#if ! defined(__alpha__)
David S. Miller8c7b7fa2007-07-10 22:08:12 -0700211 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
212 pkt_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 skb_put(skb, pkt_len);
214#else
215 memcpy(skb_put(skb, pkt_len),
David S. Miller689be432005-06-28 15:25:31 -0700216 tp->rx_buffers[entry].skb->data,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 pkt_len);
218#endif
219 pci_dma_sync_single_for_device(tp->pdev,
220 tp->rx_buffers[entry].mapping,
221 pkt_len, PCI_DMA_FROMDEVICE);
222 } else { /* Pass up the skb already on the Rx ring. */
223 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
224 pkt_len);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400225
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226#ifndef final_version
227 if (tp->rx_buffers[entry].mapping !=
228 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
Joe Perchesabe02af2010-01-28 20:59:22 +0000229 dev_err(&dev->dev,
230 "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %08llx %p / %p\n",
231 le32_to_cpu(tp->rx_ring[entry].buffer1),
232 (unsigned long long)tp->rx_buffers[entry].mapping,
233 skb->head, temp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 }
235#endif
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400236
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
238 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400239
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 tp->rx_buffers[entry].skb = NULL;
241 tp->rx_buffers[entry].mapping = 0;
242 }
243 skb->protocol = eth_type_trans(skb, dev);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400244
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 netif_receive_skb(skb);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400246
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247 tp->stats.rx_packets++;
248 tp->stats.rx_bytes += pkt_len;
249 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700250#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
251 received++;
252#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253
254 entry = (++tp->cur_rx) % RX_RING_SIZE;
255 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4)
256 tulip_refill_rx(dev);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400257
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 }
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400259
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 /* New ack strategy... irq does not ack Rx any longer
261 hopefully this helps */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400262
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 /* Really bad things can happen here... If new packet arrives
264 * and an irq arrives (tx or just due to occasionally unset
265 * mask), it will be acked by irq handler, but new thread
266 * is not scheduled. It is major hole in design.
267 * No idea how to fix this if "playing with fire" will fail
268 * tomorrow (night 011029). If it will not fail, we won
269 * finally: amount of IO did not increase at all. */
270 } while ((ioread32(tp->base_addr + CSR5) & RxIntr));
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400273
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 /* We use this simplistic scheme for IM. It's proven by
275 real life installations. We can have IM enabled
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400276 continuesly but this would cause unnecessary latency.
277 Unfortunely we can't use all the NET_RX_* feedback here.
278 This would turn on IM for devices that is not contributing
279 to backlog congestion with unnecessary latency.
280
Michael Opdenacker59c51592007-05-09 08:57:56 +0200281 We monitor the device RX-ring and have:
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400282
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 HW Interrupt Mitigation either ON or OFF.
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400284
285 ON: More then 1 pkt received (per intr.) OR we are dropping
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286 OFF: Only 1 pkt received
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400287
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288 Note. We only use min and max (0, 15) settings from mit_table */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400289
290
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 if( tp->flags & HAS_INTR_MITIGATION) {
292 if( received > 1 ) {
293 if( ! tp->mit_on ) {
294 tp->mit_on = 1;
295 iowrite32(mit_table[MIT_TABLE], tp->base_addr + CSR11);
296 }
297 }
298 else {
299 if( tp->mit_on ) {
300 tp->mit_on = 0;
301 iowrite32(0, tp->base_addr + CSR11);
302 }
303 }
304 }
305
306#endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400307
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 tulip_refill_rx(dev);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400309
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310 /* If RX ring is not full we are out of memory. */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700311 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
312 goto oom;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400313
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 /* Remove us from polling list and enable RX intr. */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400315
Ben Hutchings288379f2009-01-19 16:43:59 -0800316 napi_complete(napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400318
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 /* The last op happens after poll completion. Which means the following:
320 * 1. it can race with disabling irqs in irq handler
321 * 2. it can race with dise/enabling irqs in other poll threads
322 * 3. if an irq raised after beginning loop, it will be immediately
323 * triggered here.
324 *
325 * Summarizing: the logic results in some redundant irqs both
326 * due to races in masking and due to too late acking of already
327 * processed irqs. But it must not result in losing events.
328 */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400329
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700330 return work_done;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400331
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332 not_done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 ||
334 tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
335 tulip_refill_rx(dev);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400336
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700337 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
338 goto oom;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400339
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700340 return work_done;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400341
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 oom: /* Executed with RX ints disabled */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400343
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 /* Start timer, stop polling, but do not enable rx interrupts. */
345 mod_timer(&tp->oom_timer, jiffies+1);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400346
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347 /* Think: timer_pending() was an explicit signature of bug.
348 * Timer can be pending now but fired and completed
Ben Hutchings288379f2009-01-19 16:43:59 -0800349 * before we did napi_complete(). See? We would lose it. */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400350
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 /* remove ourselves from the polling list */
Ben Hutchings288379f2009-01-19 16:43:59 -0800352 napi_complete(napi);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400353
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700354 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355}
356
357#else /* CONFIG_TULIP_NAPI */
358
359static int tulip_rx(struct net_device *dev)
360{
361 struct tulip_private *tp = netdev_priv(dev);
362 int entry = tp->cur_rx % RX_RING_SIZE;
363 int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
364 int received = 0;
365
366 if (tulip_debug > 4)
Joe Perchesabe02af2010-01-28 20:59:22 +0000367 printk(KERN_DEBUG " In tulip_rx(), entry %d %08x\n",
368 entry, tp->rx_ring[entry].status);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 /* If we own the next entry, it is a new packet. Send it up. */
370 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
371 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
Tomasz Lemiech1f8ae0a2009-03-13 15:43:38 -0700372 short pkt_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373
374 if (tulip_debug > 5)
Joe Perchesabe02af2010-01-28 20:59:22 +0000375 printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %08x\n",
376 dev->name, entry, status);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 if (--rx_work_limit < 0)
378 break;
Tomasz Lemiech1f8ae0a2009-03-13 15:43:38 -0700379
380 /*
381 Omit the four octet CRC from the length.
382 (May not be considered valid until we have
383 checked status for RxLengthOver2047 bits)
384 */
385 pkt_len = ((status >> 16) & 0x7ff) - 4;
386 /*
387 Maximum pkt_len is 1518 (1514 + vlan header)
388 Anything higher than this is always invalid
389 regardless of RxLengthOver2047 bits
390 */
391
392 if ((status & (RxLengthOver2047 |
393 RxDescCRCError |
394 RxDescCollisionSeen |
395 RxDescRunt |
396 RxDescDescErr |
Joe Perches8e95a202009-12-03 07:58:21 +0000397 RxWholePkt)) != RxWholePkt ||
398 pkt_len > 1518) {
Tomasz Lemiech1f8ae0a2009-03-13 15:43:38 -0700399 if ((status & (RxLengthOver2047 |
400 RxWholePkt)) != RxWholePkt) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 /* Ingore earlier buffers. */
402 if ((status & 0xffff) != 0x7fff) {
403 if (tulip_debug > 1)
Joe Perchesabe02af2010-01-28 20:59:22 +0000404 dev_warn(&dev->dev,
405 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
406 status);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 tp->stats.rx_length_errors++;
408 }
Tomasz Lemiech1f8ae0a2009-03-13 15:43:38 -0700409 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 /* There was a fatal error. */
411 if (tulip_debug > 2)
Joe Perchesabe02af2010-01-28 20:59:22 +0000412 printk(KERN_DEBUG "%s: Receive error, Rx status %08x\n",
413 dev->name, status);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 tp->stats.rx_errors++; /* end of a packet.*/
Tomasz Lemiech1f8ae0a2009-03-13 15:43:38 -0700415 if (pkt_len > 1518 ||
416 (status & RxDescRunt))
417 tp->stats.rx_length_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 if (status & 0x0004) tp->stats.rx_frame_errors++;
419 if (status & 0x0002) tp->stats.rx_crc_errors++;
420 if (status & 0x0001) tp->stats.rx_fifo_errors++;
421 }
422 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 struct sk_buff *skb;
424
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 /* Check if the packet is long enough to accept without copying
426 to a minimally-sized skbuff. */
Joe Perches8e95a202009-12-03 07:58:21 +0000427 if (pkt_len < tulip_rx_copybreak &&
428 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 skb_reserve(skb, 2); /* 16 byte align the IP header */
430 pci_dma_sync_single_for_cpu(tp->pdev,
431 tp->rx_buffers[entry].mapping,
432 pkt_len, PCI_DMA_FROMDEVICE);
433#if ! defined(__alpha__)
David S. Miller8c7b7fa2007-07-10 22:08:12 -0700434 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
435 pkt_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436 skb_put(skb, pkt_len);
437#else
438 memcpy(skb_put(skb, pkt_len),
David S. Miller689be432005-06-28 15:25:31 -0700439 tp->rx_buffers[entry].skb->data,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 pkt_len);
441#endif
442 pci_dma_sync_single_for_device(tp->pdev,
443 tp->rx_buffers[entry].mapping,
444 pkt_len, PCI_DMA_FROMDEVICE);
445 } else { /* Pass up the skb already on the Rx ring. */
446 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
447 pkt_len);
448
449#ifndef final_version
450 if (tp->rx_buffers[entry].mapping !=
451 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
Joe Perchesabe02af2010-01-28 20:59:22 +0000452 dev_err(&dev->dev,
453 "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %Lx %p / %p\n",
454 le32_to_cpu(tp->rx_ring[entry].buffer1),
455 (long long)tp->rx_buffers[entry].mapping,
456 skb->head, temp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 }
458#endif
459
460 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
461 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
462
463 tp->rx_buffers[entry].skb = NULL;
464 tp->rx_buffers[entry].mapping = 0;
465 }
466 skb->protocol = eth_type_trans(skb, dev);
467
468 netif_rx(skb);
469
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 tp->stats.rx_packets++;
471 tp->stats.rx_bytes += pkt_len;
472 }
473 received++;
474 entry = (++tp->cur_rx) % RX_RING_SIZE;
475 }
476 return received;
477}
478#endif /* CONFIG_TULIP_NAPI */
479
480static inline unsigned int phy_interrupt (struct net_device *dev)
481{
482#ifdef __hppa__
483 struct tulip_private *tp = netdev_priv(dev);
484 int csr12 = ioread32(tp->base_addr + CSR12) & 0xff;
485
486 if (csr12 != tp->csr12_shadow) {
487 /* ack interrupt */
488 iowrite32(csr12 | 0x02, tp->base_addr + CSR12);
489 tp->csr12_shadow = csr12;
490 /* do link change stuff */
491 spin_lock(&tp->lock);
492 tulip_check_duplex(dev);
493 spin_unlock(&tp->lock);
494 /* clear irq ack bit */
495 iowrite32(csr12 & ~0x02, tp->base_addr + CSR12);
496
497 return 1;
498 }
499#endif
500
501 return 0;
502}
503
504/* The interrupt handler does all of the Rx thread work and cleans up
505 after the Tx thread. */
David Howells7d12e782006-10-05 14:55:46 +0100506irqreturn_t tulip_interrupt(int irq, void *dev_instance)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507{
508 struct net_device *dev = (struct net_device *)dev_instance;
509 struct tulip_private *tp = netdev_priv(dev);
510 void __iomem *ioaddr = tp->base_addr;
511 int csr5;
512 int missed;
513 int rx = 0;
514 int tx = 0;
515 int oi = 0;
516 int maxrx = RX_RING_SIZE;
517 int maxtx = TX_RING_SIZE;
518 int maxoi = TX_RING_SIZE;
519#ifdef CONFIG_TULIP_NAPI
520 int rxd = 0;
521#else
522 int entry;
523#endif
524 unsigned int work_count = tulip_max_interrupt_work;
525 unsigned int handled = 0;
526
527 /* Let's see whether the interrupt really is for us */
528 csr5 = ioread32(ioaddr + CSR5);
529
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400530 if (tp->flags & HAS_PHY_IRQ)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 handled = phy_interrupt (dev);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400532
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
534 return IRQ_RETVAL(handled);
535
536 tp->nir++;
537
538 do {
539
540#ifdef CONFIG_TULIP_NAPI
541
542 if (!rxd && (csr5 & (RxIntr | RxNoBuf))) {
543 rxd++;
544 /* Mask RX intrs and add the device to poll list. */
545 iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7);
Ben Hutchings288379f2009-01-19 16:43:59 -0800546 napi_schedule(&tp->napi);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400547
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass)))
549 break;
550 }
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400551
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552 /* Acknowledge the interrupt sources we handle here ASAP
553 the poll function does Rx and RxNoBuf acking */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400554
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5);
556
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400557#else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 /* Acknowledge all of the current interrupt sources ASAP. */
559 iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5);
560
561
562 if (csr5 & (RxIntr | RxNoBuf)) {
563 rx += tulip_rx(dev);
564 tulip_refill_rx(dev);
565 }
566
567#endif /* CONFIG_TULIP_NAPI */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400568
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 if (tulip_debug > 4)
Joe Perchesabe02af2010-01-28 20:59:22 +0000570 printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 dev->name, csr5, ioread32(ioaddr + CSR5));
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400572
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573
574 if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
575 unsigned int dirty_tx;
576
577 spin_lock(&tp->lock);
578
579 for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
580 dirty_tx++) {
581 int entry = dirty_tx % TX_RING_SIZE;
582 int status = le32_to_cpu(tp->tx_ring[entry].status);
583
584 if (status < 0)
585 break; /* It still has not been Txed */
586
587 /* Check for Rx filter setup frames. */
588 if (tp->tx_buffers[entry].skb == NULL) {
589 /* test because dummy frames not mapped */
590 if (tp->tx_buffers[entry].mapping)
591 pci_unmap_single(tp->pdev,
592 tp->tx_buffers[entry].mapping,
593 sizeof(tp->setup_frame),
594 PCI_DMA_TODEVICE);
595 continue;
596 }
597
598 if (status & 0x8000) {
599 /* There was an major error, log it. */
600#ifndef final_version
601 if (tulip_debug > 1)
Joe Perchesabe02af2010-01-28 20:59:22 +0000602 printk(KERN_DEBUG "%s: Transmit error, Tx status %08x\n",
603 dev->name, status);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604#endif
605 tp->stats.tx_errors++;
606 if (status & 0x4104) tp->stats.tx_aborted_errors++;
607 if (status & 0x0C00) tp->stats.tx_carrier_errors++;
608 if (status & 0x0200) tp->stats.tx_window_errors++;
609 if (status & 0x0002) tp->stats.tx_fifo_errors++;
610 if ((status & 0x0080) && tp->full_duplex == 0)
611 tp->stats.tx_heartbeat_errors++;
612 } else {
613 tp->stats.tx_bytes +=
614 tp->tx_buffers[entry].skb->len;
615 tp->stats.collisions += (status >> 3) & 15;
616 tp->stats.tx_packets++;
617 }
618
619 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
620 tp->tx_buffers[entry].skb->len,
621 PCI_DMA_TODEVICE);
622
623 /* Free the original skb. */
624 dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
625 tp->tx_buffers[entry].skb = NULL;
626 tp->tx_buffers[entry].mapping = 0;
627 tx++;
628 }
629
630#ifndef final_version
631 if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
Joe Perchesabe02af2010-01-28 20:59:22 +0000632 dev_err(&dev->dev,
633 "Out-of-sync dirty pointer, %d vs. %d\n",
634 dirty_tx, tp->cur_tx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 dirty_tx += TX_RING_SIZE;
636 }
637#endif
638
639 if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
640 netif_wake_queue(dev);
641
642 tp->dirty_tx = dirty_tx;
643 if (csr5 & TxDied) {
644 if (tulip_debug > 2)
Joe Perchesabe02af2010-01-28 20:59:22 +0000645 dev_warn(&dev->dev,
646 "The transmitter stopped. CSR5 is %x, CSR6 %x, new CSR6 %x\n",
647 csr5, ioread32(ioaddr + CSR6),
648 tp->csr6);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 tulip_restart_rxtx(tp);
650 }
651 spin_unlock(&tp->lock);
652 }
653
654 /* Log errors. */
655 if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */
656 if (csr5 == 0xffffffff)
657 break;
658 if (csr5 & TxJabber) tp->stats.tx_errors++;
659 if (csr5 & TxFIFOUnderflow) {
660 if ((tp->csr6 & 0xC000) != 0xC000)
661 tp->csr6 += 0x4000; /* Bump up the Tx threshold */
662 else
663 tp->csr6 |= 0x00200000; /* Store-n-forward. */
664 /* Restart the transmit process. */
665 tulip_restart_rxtx(tp);
666 iowrite32(0, ioaddr + CSR1);
667 }
668 if (csr5 & (RxDied | RxNoBuf)) {
669 if (tp->flags & COMET_MAC_ADDR) {
670 iowrite32(tp->mc_filter[0], ioaddr + 0xAC);
671 iowrite32(tp->mc_filter[1], ioaddr + 0xB0);
672 }
673 }
674 if (csr5 & RxDied) { /* Missed a Rx frame. */
675 tp->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
676 tp->stats.rx_errors++;
677 tulip_start_rxtx(tp);
678 }
679 /*
680 * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
681 * call is ever done under the spinlock
682 */
683 if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
684 if (tp->link_change)
685 (tp->link_change)(dev, csr5);
686 }
Valerie Henson1ddb9862007-03-12 02:31:33 -0700687 if (csr5 & SystemError) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 int error = (csr5 >> 23) & 7;
689 /* oops, we hit a PCI error. The code produced corresponds
690 * to the reason:
691 * 0 - parity error
692 * 1 - master abort
693 * 2 - target abort
694 * Note that on parity error, we should do a software reset
695 * of the chip to get it back into a sane state (according
696 * to the 21142/3 docs that is).
697 * -- rmk
698 */
Joe Perchesabe02af2010-01-28 20:59:22 +0000699 dev_err(&dev->dev,
700 "(%lu) System Error occurred (%d)\n",
701 tp->nir, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 }
703 /* Clear all error sources, included undocumented ones! */
704 iowrite32(0x0800f7ba, ioaddr + CSR5);
705 oi++;
706 }
707 if (csr5 & TimerInt) {
708
709 if (tulip_debug > 2)
Joe Perchesabe02af2010-01-28 20:59:22 +0000710 dev_err(&dev->dev,
711 "Re-enabling interrupts, %08x\n",
712 csr5);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
714 tp->ttimer = 0;
715 oi++;
716 }
717 if (tx > maxtx || rx > maxrx || oi > maxoi) {
718 if (tulip_debug > 1)
Joe Perchesabe02af2010-01-28 20:59:22 +0000719 dev_warn(&dev->dev, "Too much work during an interrupt, csr5=0x%08x. (%lu) (%d,%d,%d)\n",
720 csr5, tp->nir, tx, rx, oi);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721
722 /* Acknowledge all interrupt sources. */
723 iowrite32(0x8001ffff, ioaddr + CSR5);
724 if (tp->flags & HAS_INTR_MITIGATION) {
725 /* Josip Loncaric at ICASE did extensive experimentation
726 to develop a good interrupt mitigation setting.*/
727 iowrite32(0x8b240000, ioaddr + CSR11);
728 } else if (tp->chip_id == LC82C168) {
729 /* the LC82C168 doesn't have a hw timer.*/
730 iowrite32(0x00, ioaddr + CSR7);
731 mod_timer(&tp->timer, RUN_AT(HZ/50));
732 } else {
733 /* Mask all interrupting sources, set timer to
734 re-enable. */
735 iowrite32(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7);
736 iowrite32(0x0012, ioaddr + CSR11);
737 }
738 break;
739 }
740
741 work_count--;
742 if (work_count == 0)
743 break;
744
745 csr5 = ioread32(ioaddr + CSR5);
746
747#ifdef CONFIG_TULIP_NAPI
748 if (rxd)
749 csr5 &= ~RxPollInt;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400750 } while ((csr5 & (TxNoBuf |
751 TxDied |
752 TxIntr |
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 TimerInt |
754 /* Abnormal intr. */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400755 RxDied |
756 TxFIFOUnderflow |
757 TxJabber |
758 TPLnkFail |
Valerie Henson1ddb9862007-03-12 02:31:33 -0700759 SystemError )) != 0);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400760#else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
762
763 tulip_refill_rx(dev);
764
765 /* check if the card is in suspend mode */
766 entry = tp->dirty_rx % RX_RING_SIZE;
767 if (tp->rx_buffers[entry].skb == NULL) {
768 if (tulip_debug > 1)
Joe Perchesabe02af2010-01-28 20:59:22 +0000769 dev_warn(&dev->dev,
770 "in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n",
771 tp->nir, tp->cur_rx, tp->ttimer, rx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772 if (tp->chip_id == LC82C168) {
773 iowrite32(0x00, ioaddr + CSR7);
774 mod_timer(&tp->timer, RUN_AT(HZ/50));
775 } else {
776 if (tp->ttimer == 0 || (ioread32(ioaddr + CSR11) & 0xffff) == 0) {
777 if (tulip_debug > 1)
Joe Perchesabe02af2010-01-28 20:59:22 +0000778 dev_warn(&dev->dev,
779 "in rx suspend mode: (%lu) set timer\n",
780 tp->nir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 iowrite32(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
782 ioaddr + CSR7);
783 iowrite32(TimerInt, ioaddr + CSR5);
784 iowrite32(12, ioaddr + CSR11);
785 tp->ttimer = 1;
786 }
787 }
788 }
789#endif /* CONFIG_TULIP_NAPI */
790
791 if ((missed = ioread32(ioaddr + CSR8) & 0x1ffff)) {
792 tp->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
793 }
794
795 if (tulip_debug > 4)
Joe Perchesabe02af2010-01-28 20:59:22 +0000796 printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#04x\n",
797 dev->name, ioread32(ioaddr + CSR5));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798
799 return IRQ_HANDLED;
800}