blob: 6284afd14bbb5fe42789b0a6f9531172ef9adf12 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 drivers/net/tulip/interrupt.c
3
Valerie Henson6b928012006-09-08 11:15:34 -07004 Maintained by Valerie Henson <val_henson@linux.intel.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 Copyright 2000,2001 The Linux Kernel Team
6 Written/copyright 1994-2001 by Donald Becker.
7
8 This software may be used and distributed according to the terms
9 of the GNU General Public License, incorporated herein by reference.
10
11 Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
12 for more information on this driver, or visit the project
13 Web page at http://sourceforge.net/projects/tulip/
14
15*/
16
17#include <linux/pci.h>
18#include "tulip.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/etherdevice.h>
20
21int tulip_rx_copybreak;
22unsigned int tulip_max_interrupt_work;
23
24#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
25#define MIT_SIZE 15
26#define MIT_TABLE 15 /* We use 0 or max */
27
28static unsigned int mit_table[MIT_SIZE+1] =
29{
30 /* CRS11 21143 hardware Mitigation Control Interrupt
31 We use only RX mitigation we other techniques for
32 TX intr. mitigation.
33
34 31 Cycle Size (timer control)
35 30:27 TX timer in 16 * Cycle size
36 26:24 TX No pkts before Int.
37 23:20 RX timer in Cycle size
38 19:17 RX No pkts before Int.
39 16 Continues Mode (CM)
40 */
41
42 0x0, /* IM disabled */
43 0x80150000, /* RX time = 1, RX pkts = 2, CM = 1 */
44 0x80150000,
45 0x80270000,
46 0x80370000,
47 0x80490000,
48 0x80590000,
49 0x80690000,
50 0x807B0000,
51 0x808B0000,
52 0x809D0000,
53 0x80AD0000,
54 0x80BD0000,
55 0x80CF0000,
56 0x80DF0000,
57// 0x80FF0000 /* RX time = 16, RX pkts = 7, CM = 1 */
58 0x80F10000 /* RX time = 16, RX pkts = 0, CM = 1 */
59};
60#endif
61
62
63int tulip_refill_rx(struct net_device *dev)
64{
65 struct tulip_private *tp = netdev_priv(dev);
66 int entry;
67 int refilled = 0;
68
69 /* Refill the Rx ring buffers. */
70 for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
71 entry = tp->dirty_rx % RX_RING_SIZE;
72 if (tp->rx_buffers[entry].skb == NULL) {
73 struct sk_buff *skb;
74 dma_addr_t mapping;
75
76 skb = tp->rx_buffers[entry].skb = dev_alloc_skb(PKT_BUF_SZ);
77 if (skb == NULL)
78 break;
79
David S. Miller689be432005-06-28 15:25:31 -070080 mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ,
Linus Torvalds1da177e2005-04-16 15:20:36 -070081 PCI_DMA_FROMDEVICE);
82 tp->rx_buffers[entry].mapping = mapping;
83
84 skb->dev = dev; /* Mark as being used by this device. */
85 tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
86 refilled++;
87 }
88 tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
89 }
90 if(tp->chip_id == LC82C168) {
91 if(((ioread32(tp->base_addr + CSR5)>>17)&0x07) == 4) {
92 /* Rx stopped due to out of buffers,
93 * restart it
94 */
95 iowrite32(0x01, tp->base_addr + CSR2);
96 }
97 }
98 return refilled;
99}
100
101#ifdef CONFIG_TULIP_NAPI
102
103void oom_timer(unsigned long data)
104{
105 struct net_device *dev = (struct net_device *)data;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700106 struct tulip_private *tp = netdev_priv(dev);
107 netif_rx_schedule(dev, &tp->napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108}
109
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700110int tulip_poll(struct napi_struct *napi, int budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111{
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700112 struct tulip_private *tp = container_of(napi, struct tulip_private, napi);
113 struct net_device *dev = tp->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114 int entry = tp->cur_rx % RX_RING_SIZE;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700115 int work_done = 0;
116#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 int received = 0;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700118#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
121
122/* that one buffer is needed for mit activation; or might be a
123 bug in the ring buffer code; check later -- JHS*/
124
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700125 if (budget >=RX_RING_SIZE) budget--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126#endif
127
128 if (tulip_debug > 4)
129 printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
130 tp->rx_ring[entry].status);
131
132 do {
133 if (ioread32(tp->base_addr + CSR5) == 0xffffffff) {
134 printk(KERN_DEBUG " In tulip_poll(), hardware disappeared.\n");
135 break;
136 }
137 /* Acknowledge current RX interrupt sources. */
138 iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400139
140
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 /* If we own the next entry, it is a new packet. Send it up. */
142 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
143 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400144
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145 if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
146 break;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400147
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 if (tulip_debug > 5)
149 printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
150 dev->name, entry, status);
Stephen Hemmingerc6a1b622008-01-07 00:23:04 -0800151
152 if (++work_done >= budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 goto not_done;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400154
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 if ((status & 0x38008300) != 0x0300) {
156 if ((status & 0x38000300) != 0x0300) {
157 /* Ingore earlier buffers. */
158 if ((status & 0xffff) != 0x7fff) {
159 if (tulip_debug > 1)
160 printk(KERN_WARNING "%s: Oversized Ethernet frame "
161 "spanned multiple buffers, status %8.8x!\n",
162 dev->name, status);
163 tp->stats.rx_length_errors++;
164 }
165 } else if (status & RxDescFatalErr) {
166 /* There was a fatal error. */
167 if (tulip_debug > 2)
168 printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
169 dev->name, status);
170 tp->stats.rx_errors++; /* end of a packet.*/
171 if (status & 0x0890) tp->stats.rx_length_errors++;
172 if (status & 0x0004) tp->stats.rx_frame_errors++;
173 if (status & 0x0002) tp->stats.rx_crc_errors++;
174 if (status & 0x0001) tp->stats.rx_fifo_errors++;
175 }
176 } else {
177 /* Omit the four octet CRC from the length. */
178 short pkt_len = ((status >> 16) & 0x7ff) - 4;
179 struct sk_buff *skb;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400180
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181#ifndef final_version
182 if (pkt_len > 1518) {
183 printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
184 dev->name, pkt_len, pkt_len);
185 pkt_len = 1518;
186 tp->stats.rx_length_errors++;
187 }
188#endif
189 /* Check if the packet is long enough to accept without copying
190 to a minimally-sized skbuff. */
191 if (pkt_len < tulip_rx_copybreak
192 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193 skb_reserve(skb, 2); /* 16 byte align the IP header */
194 pci_dma_sync_single_for_cpu(tp->pdev,
195 tp->rx_buffers[entry].mapping,
196 pkt_len, PCI_DMA_FROMDEVICE);
197#if ! defined(__alpha__)
David S. Miller8c7b7fa2007-07-10 22:08:12 -0700198 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
199 pkt_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 skb_put(skb, pkt_len);
201#else
202 memcpy(skb_put(skb, pkt_len),
David S. Miller689be432005-06-28 15:25:31 -0700203 tp->rx_buffers[entry].skb->data,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 pkt_len);
205#endif
206 pci_dma_sync_single_for_device(tp->pdev,
207 tp->rx_buffers[entry].mapping,
208 pkt_len, PCI_DMA_FROMDEVICE);
209 } else { /* Pass up the skb already on the Rx ring. */
210 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
211 pkt_len);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400212
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213#ifndef final_version
214 if (tp->rx_buffers[entry].mapping !=
215 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
216 printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
217 "do not match in tulip_rx: %08x vs. %08llx %p / %p.\n",
218 dev->name,
219 le32_to_cpu(tp->rx_ring[entry].buffer1),
220 (unsigned long long)tp->rx_buffers[entry].mapping,
221 skb->head, temp);
222 }
223#endif
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400224
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
226 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400227
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 tp->rx_buffers[entry].skb = NULL;
229 tp->rx_buffers[entry].mapping = 0;
230 }
231 skb->protocol = eth_type_trans(skb, dev);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400232
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 netif_receive_skb(skb);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400234
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 dev->last_rx = jiffies;
236 tp->stats.rx_packets++;
237 tp->stats.rx_bytes += pkt_len;
238 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700239#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
240 received++;
241#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242
243 entry = (++tp->cur_rx) % RX_RING_SIZE;
244 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4)
245 tulip_refill_rx(dev);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400246
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247 }
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400248
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 /* New ack strategy... irq does not ack Rx any longer
250 hopefully this helps */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400251
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 /* Really bad things can happen here... If new packet arrives
253 * and an irq arrives (tx or just due to occasionally unset
254 * mask), it will be acked by irq handler, but new thread
255 * is not scheduled. It is major hole in design.
256 * No idea how to fix this if "playing with fire" will fail
257 * tomorrow (night 011029). If it will not fail, we won
258 * finally: amount of IO did not increase at all. */
259 } while ((ioread32(tp->base_addr + CSR5) & RxIntr));
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400260
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400262
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 /* We use this simplistic scheme for IM. It's proven by
264 real life installations. We can have IM enabled
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400265 continuesly but this would cause unnecessary latency.
266 Unfortunely we can't use all the NET_RX_* feedback here.
267 This would turn on IM for devices that is not contributing
268 to backlog congestion with unnecessary latency.
269
Michael Opdenacker59c51592007-05-09 08:57:56 +0200270 We monitor the device RX-ring and have:
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 HW Interrupt Mitigation either ON or OFF.
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400273
274 ON: More then 1 pkt received (per intr.) OR we are dropping
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 OFF: Only 1 pkt received
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400276
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 Note. We only use min and max (0, 15) settings from mit_table */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400278
279
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280 if( tp->flags & HAS_INTR_MITIGATION) {
281 if( received > 1 ) {
282 if( ! tp->mit_on ) {
283 tp->mit_on = 1;
284 iowrite32(mit_table[MIT_TABLE], tp->base_addr + CSR11);
285 }
286 }
287 else {
288 if( tp->mit_on ) {
289 tp->mit_on = 0;
290 iowrite32(0, tp->base_addr + CSR11);
291 }
292 }
293 }
294
295#endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400296
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 tulip_refill_rx(dev);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400298
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 /* If RX ring is not full we are out of memory. */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700300 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
301 goto oom;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400302
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 /* Remove us from polling list and enable RX intr. */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400304
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700305 netif_rx_complete(dev, napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400307
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 /* The last op happens after poll completion. Which means the following:
309 * 1. it can race with disabling irqs in irq handler
310 * 2. it can race with dise/enabling irqs in other poll threads
311 * 3. if an irq raised after beginning loop, it will be immediately
312 * triggered here.
313 *
314 * Summarizing: the logic results in some redundant irqs both
315 * due to races in masking and due to too late acking of already
316 * processed irqs. But it must not result in losing events.
317 */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400318
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700319 return work_done;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400320
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 not_done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 ||
323 tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
324 tulip_refill_rx(dev);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400325
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700326 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
327 goto oom;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400328
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700329 return work_done;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400330
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 oom: /* Executed with RX ints disabled */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400332
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333 /* Start timer, stop polling, but do not enable rx interrupts. */
334 mod_timer(&tp->oom_timer, jiffies+1);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400335
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 /* Think: timer_pending() was an explicit signature of bug.
337 * Timer can be pending now but fired and completed
338 * before we did netif_rx_complete(). See? We would lose it. */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400339
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 /* remove ourselves from the polling list */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700341 netif_rx_complete(dev, napi);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400342
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700343 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344}
345
346#else /* CONFIG_TULIP_NAPI */
347
348static int tulip_rx(struct net_device *dev)
349{
350 struct tulip_private *tp = netdev_priv(dev);
351 int entry = tp->cur_rx % RX_RING_SIZE;
352 int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
353 int received = 0;
354
355 if (tulip_debug > 4)
356 printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
357 tp->rx_ring[entry].status);
358 /* If we own the next entry, it is a new packet. Send it up. */
359 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
360 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
361
362 if (tulip_debug > 5)
363 printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
364 dev->name, entry, status);
365 if (--rx_work_limit < 0)
366 break;
367 if ((status & 0x38008300) != 0x0300) {
368 if ((status & 0x38000300) != 0x0300) {
369 /* Ingore earlier buffers. */
370 if ((status & 0xffff) != 0x7fff) {
371 if (tulip_debug > 1)
372 printk(KERN_WARNING "%s: Oversized Ethernet frame "
373 "spanned multiple buffers, status %8.8x!\n",
374 dev->name, status);
375 tp->stats.rx_length_errors++;
376 }
377 } else if (status & RxDescFatalErr) {
378 /* There was a fatal error. */
379 if (tulip_debug > 2)
380 printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
381 dev->name, status);
382 tp->stats.rx_errors++; /* end of a packet.*/
383 if (status & 0x0890) tp->stats.rx_length_errors++;
384 if (status & 0x0004) tp->stats.rx_frame_errors++;
385 if (status & 0x0002) tp->stats.rx_crc_errors++;
386 if (status & 0x0001) tp->stats.rx_fifo_errors++;
387 }
388 } else {
389 /* Omit the four octet CRC from the length. */
390 short pkt_len = ((status >> 16) & 0x7ff) - 4;
391 struct sk_buff *skb;
392
393#ifndef final_version
394 if (pkt_len > 1518) {
395 printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
396 dev->name, pkt_len, pkt_len);
397 pkt_len = 1518;
398 tp->stats.rx_length_errors++;
399 }
400#endif
401
402 /* Check if the packet is long enough to accept without copying
403 to a minimally-sized skbuff. */
404 if (pkt_len < tulip_rx_copybreak
405 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 skb_reserve(skb, 2); /* 16 byte align the IP header */
407 pci_dma_sync_single_for_cpu(tp->pdev,
408 tp->rx_buffers[entry].mapping,
409 pkt_len, PCI_DMA_FROMDEVICE);
410#if ! defined(__alpha__)
David S. Miller8c7b7fa2007-07-10 22:08:12 -0700411 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
412 pkt_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413 skb_put(skb, pkt_len);
414#else
415 memcpy(skb_put(skb, pkt_len),
David S. Miller689be432005-06-28 15:25:31 -0700416 tp->rx_buffers[entry].skb->data,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 pkt_len);
418#endif
419 pci_dma_sync_single_for_device(tp->pdev,
420 tp->rx_buffers[entry].mapping,
421 pkt_len, PCI_DMA_FROMDEVICE);
422 } else { /* Pass up the skb already on the Rx ring. */
423 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
424 pkt_len);
425
426#ifndef final_version
427 if (tp->rx_buffers[entry].mapping !=
428 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
429 printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
430 "do not match in tulip_rx: %08x vs. %Lx %p / %p.\n",
431 dev->name,
432 le32_to_cpu(tp->rx_ring[entry].buffer1),
433 (long long)tp->rx_buffers[entry].mapping,
434 skb->head, temp);
435 }
436#endif
437
438 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
439 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
440
441 tp->rx_buffers[entry].skb = NULL;
442 tp->rx_buffers[entry].mapping = 0;
443 }
444 skb->protocol = eth_type_trans(skb, dev);
445
446 netif_rx(skb);
447
448 dev->last_rx = jiffies;
449 tp->stats.rx_packets++;
450 tp->stats.rx_bytes += pkt_len;
451 }
452 received++;
453 entry = (++tp->cur_rx) % RX_RING_SIZE;
454 }
455 return received;
456}
457#endif /* CONFIG_TULIP_NAPI */
458
459static inline unsigned int phy_interrupt (struct net_device *dev)
460{
461#ifdef __hppa__
462 struct tulip_private *tp = netdev_priv(dev);
463 int csr12 = ioread32(tp->base_addr + CSR12) & 0xff;
464
465 if (csr12 != tp->csr12_shadow) {
466 /* ack interrupt */
467 iowrite32(csr12 | 0x02, tp->base_addr + CSR12);
468 tp->csr12_shadow = csr12;
469 /* do link change stuff */
470 spin_lock(&tp->lock);
471 tulip_check_duplex(dev);
472 spin_unlock(&tp->lock);
473 /* clear irq ack bit */
474 iowrite32(csr12 & ~0x02, tp->base_addr + CSR12);
475
476 return 1;
477 }
478#endif
479
480 return 0;
481}
482
483/* The interrupt handler does all of the Rx thread work and cleans up
484 after the Tx thread. */
David Howells7d12e782006-10-05 14:55:46 +0100485irqreturn_t tulip_interrupt(int irq, void *dev_instance)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486{
487 struct net_device *dev = (struct net_device *)dev_instance;
488 struct tulip_private *tp = netdev_priv(dev);
489 void __iomem *ioaddr = tp->base_addr;
490 int csr5;
491 int missed;
492 int rx = 0;
493 int tx = 0;
494 int oi = 0;
495 int maxrx = RX_RING_SIZE;
496 int maxtx = TX_RING_SIZE;
497 int maxoi = TX_RING_SIZE;
498#ifdef CONFIG_TULIP_NAPI
499 int rxd = 0;
500#else
501 int entry;
502#endif
503 unsigned int work_count = tulip_max_interrupt_work;
504 unsigned int handled = 0;
505
506 /* Let's see whether the interrupt really is for us */
507 csr5 = ioread32(ioaddr + CSR5);
508
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400509 if (tp->flags & HAS_PHY_IRQ)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510 handled = phy_interrupt (dev);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400511
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512 if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
513 return IRQ_RETVAL(handled);
514
515 tp->nir++;
516
517 do {
518
519#ifdef CONFIG_TULIP_NAPI
520
521 if (!rxd && (csr5 & (RxIntr | RxNoBuf))) {
522 rxd++;
523 /* Mask RX intrs and add the device to poll list. */
524 iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700525 netif_rx_schedule(dev, &tp->napi);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400526
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass)))
528 break;
529 }
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400530
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 /* Acknowledge the interrupt sources we handle here ASAP
532 the poll function does Rx and RxNoBuf acking */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400533
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5);
535
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400536#else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 /* Acknowledge all of the current interrupt sources ASAP. */
538 iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5);
539
540
541 if (csr5 & (RxIntr | RxNoBuf)) {
542 rx += tulip_rx(dev);
543 tulip_refill_rx(dev);
544 }
545
546#endif /* CONFIG_TULIP_NAPI */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400547
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 if (tulip_debug > 4)
549 printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n",
550 dev->name, csr5, ioread32(ioaddr + CSR5));
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400551
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552
553 if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
554 unsigned int dirty_tx;
555
556 spin_lock(&tp->lock);
557
558 for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
559 dirty_tx++) {
560 int entry = dirty_tx % TX_RING_SIZE;
561 int status = le32_to_cpu(tp->tx_ring[entry].status);
562
563 if (status < 0)
564 break; /* It still has not been Txed */
565
566 /* Check for Rx filter setup frames. */
567 if (tp->tx_buffers[entry].skb == NULL) {
568 /* test because dummy frames not mapped */
569 if (tp->tx_buffers[entry].mapping)
570 pci_unmap_single(tp->pdev,
571 tp->tx_buffers[entry].mapping,
572 sizeof(tp->setup_frame),
573 PCI_DMA_TODEVICE);
574 continue;
575 }
576
577 if (status & 0x8000) {
578 /* There was an major error, log it. */
579#ifndef final_version
580 if (tulip_debug > 1)
581 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
582 dev->name, status);
583#endif
584 tp->stats.tx_errors++;
585 if (status & 0x4104) tp->stats.tx_aborted_errors++;
586 if (status & 0x0C00) tp->stats.tx_carrier_errors++;
587 if (status & 0x0200) tp->stats.tx_window_errors++;
588 if (status & 0x0002) tp->stats.tx_fifo_errors++;
589 if ((status & 0x0080) && tp->full_duplex == 0)
590 tp->stats.tx_heartbeat_errors++;
591 } else {
592 tp->stats.tx_bytes +=
593 tp->tx_buffers[entry].skb->len;
594 tp->stats.collisions += (status >> 3) & 15;
595 tp->stats.tx_packets++;
596 }
597
598 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
599 tp->tx_buffers[entry].skb->len,
600 PCI_DMA_TODEVICE);
601
602 /* Free the original skb. */
603 dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
604 tp->tx_buffers[entry].skb = NULL;
605 tp->tx_buffers[entry].mapping = 0;
606 tx++;
607 }
608
609#ifndef final_version
610 if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
611 printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n",
612 dev->name, dirty_tx, tp->cur_tx);
613 dirty_tx += TX_RING_SIZE;
614 }
615#endif
616
617 if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
618 netif_wake_queue(dev);
619
620 tp->dirty_tx = dirty_tx;
621 if (csr5 & TxDied) {
622 if (tulip_debug > 2)
623 printk(KERN_WARNING "%s: The transmitter stopped."
624 " CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
625 dev->name, csr5, ioread32(ioaddr + CSR6), tp->csr6);
626 tulip_restart_rxtx(tp);
627 }
628 spin_unlock(&tp->lock);
629 }
630
631 /* Log errors. */
632 if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */
633 if (csr5 == 0xffffffff)
634 break;
635 if (csr5 & TxJabber) tp->stats.tx_errors++;
636 if (csr5 & TxFIFOUnderflow) {
637 if ((tp->csr6 & 0xC000) != 0xC000)
638 tp->csr6 += 0x4000; /* Bump up the Tx threshold */
639 else
640 tp->csr6 |= 0x00200000; /* Store-n-forward. */
641 /* Restart the transmit process. */
642 tulip_restart_rxtx(tp);
643 iowrite32(0, ioaddr + CSR1);
644 }
645 if (csr5 & (RxDied | RxNoBuf)) {
646 if (tp->flags & COMET_MAC_ADDR) {
647 iowrite32(tp->mc_filter[0], ioaddr + 0xAC);
648 iowrite32(tp->mc_filter[1], ioaddr + 0xB0);
649 }
650 }
651 if (csr5 & RxDied) { /* Missed a Rx frame. */
652 tp->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
653 tp->stats.rx_errors++;
654 tulip_start_rxtx(tp);
655 }
656 /*
657 * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
658 * call is ever done under the spinlock
659 */
660 if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
661 if (tp->link_change)
662 (tp->link_change)(dev, csr5);
663 }
Valerie Henson1ddb9862007-03-12 02:31:33 -0700664 if (csr5 & SystemError) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 int error = (csr5 >> 23) & 7;
666 /* oops, we hit a PCI error. The code produced corresponds
667 * to the reason:
668 * 0 - parity error
669 * 1 - master abort
670 * 2 - target abort
671 * Note that on parity error, we should do a software reset
672 * of the chip to get it back into a sane state (according
673 * to the 21142/3 docs that is).
674 * -- rmk
675 */
676 printk(KERN_ERR "%s: (%lu) System Error occurred (%d)\n",
677 dev->name, tp->nir, error);
678 }
679 /* Clear all error sources, included undocumented ones! */
680 iowrite32(0x0800f7ba, ioaddr + CSR5);
681 oi++;
682 }
683 if (csr5 & TimerInt) {
684
685 if (tulip_debug > 2)
686 printk(KERN_ERR "%s: Re-enabling interrupts, %8.8x.\n",
687 dev->name, csr5);
688 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
689 tp->ttimer = 0;
690 oi++;
691 }
692 if (tx > maxtx || rx > maxrx || oi > maxoi) {
693 if (tulip_debug > 1)
694 printk(KERN_WARNING "%s: Too much work during an interrupt, "
695 "csr5=0x%8.8x. (%lu) (%d,%d,%d)\n", dev->name, csr5, tp->nir, tx, rx, oi);
696
697 /* Acknowledge all interrupt sources. */
698 iowrite32(0x8001ffff, ioaddr + CSR5);
699 if (tp->flags & HAS_INTR_MITIGATION) {
700 /* Josip Loncaric at ICASE did extensive experimentation
701 to develop a good interrupt mitigation setting.*/
702 iowrite32(0x8b240000, ioaddr + CSR11);
703 } else if (tp->chip_id == LC82C168) {
704 /* the LC82C168 doesn't have a hw timer.*/
705 iowrite32(0x00, ioaddr + CSR7);
706 mod_timer(&tp->timer, RUN_AT(HZ/50));
707 } else {
708 /* Mask all interrupting sources, set timer to
709 re-enable. */
710 iowrite32(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7);
711 iowrite32(0x0012, ioaddr + CSR11);
712 }
713 break;
714 }
715
716 work_count--;
717 if (work_count == 0)
718 break;
719
720 csr5 = ioread32(ioaddr + CSR5);
721
722#ifdef CONFIG_TULIP_NAPI
723 if (rxd)
724 csr5 &= ~RxPollInt;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400725 } while ((csr5 & (TxNoBuf |
726 TxDied |
727 TxIntr |
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 TimerInt |
729 /* Abnormal intr. */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400730 RxDied |
731 TxFIFOUnderflow |
732 TxJabber |
733 TPLnkFail |
Valerie Henson1ddb9862007-03-12 02:31:33 -0700734 SystemError )) != 0);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400735#else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736 } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
737
738 tulip_refill_rx(dev);
739
740 /* check if the card is in suspend mode */
741 entry = tp->dirty_rx % RX_RING_SIZE;
742 if (tp->rx_buffers[entry].skb == NULL) {
743 if (tulip_debug > 1)
744 printk(KERN_WARNING "%s: in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n", dev->name, tp->nir, tp->cur_rx, tp->ttimer, rx);
745 if (tp->chip_id == LC82C168) {
746 iowrite32(0x00, ioaddr + CSR7);
747 mod_timer(&tp->timer, RUN_AT(HZ/50));
748 } else {
749 if (tp->ttimer == 0 || (ioread32(ioaddr + CSR11) & 0xffff) == 0) {
750 if (tulip_debug > 1)
751 printk(KERN_WARNING "%s: in rx suspend mode: (%lu) set timer\n", dev->name, tp->nir);
752 iowrite32(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
753 ioaddr + CSR7);
754 iowrite32(TimerInt, ioaddr + CSR5);
755 iowrite32(12, ioaddr + CSR11);
756 tp->ttimer = 1;
757 }
758 }
759 }
760#endif /* CONFIG_TULIP_NAPI */
761
762 if ((missed = ioread32(ioaddr + CSR8) & 0x1ffff)) {
763 tp->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
764 }
765
766 if (tulip_debug > 4)
767 printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
768 dev->name, ioread32(ioaddr + CSR5));
769
770 return IRQ_HANDLED;
771}