blob: c6bad987d63e744c452ed9d8b999aa0affe441d0 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 drivers/net/tulip/interrupt.c
3
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 Copyright 2000,2001 The Linux Kernel Team
5 Written/copyright 1994-2001 by Donald Becker.
6
7 This software may be used and distributed according to the terms
8 of the GNU General Public License, incorporated herein by reference.
9
10 Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
Grant Grundler78a65512008-06-05 00:38:55 -060011 for more information on this driver.
12 Please submit bugs to http://bugzilla.kernel.org/ .
Linus Torvalds1da177e2005-04-16 15:20:36 -070013
14*/
15
16#include <linux/pci.h>
17#include "tulip.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/etherdevice.h>
19
20int tulip_rx_copybreak;
21unsigned int tulip_max_interrupt_work;
22
23#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
24#define MIT_SIZE 15
25#define MIT_TABLE 15 /* We use 0 or max */
26
27static unsigned int mit_table[MIT_SIZE+1] =
28{
29 /* CRS11 21143 hardware Mitigation Control Interrupt
30 We use only RX mitigation we other techniques for
31 TX intr. mitigation.
32
33 31 Cycle Size (timer control)
34 30:27 TX timer in 16 * Cycle size
35 26:24 TX No pkts before Int.
36 23:20 RX timer in Cycle size
37 19:17 RX No pkts before Int.
38 16 Continues Mode (CM)
39 */
40
41 0x0, /* IM disabled */
42 0x80150000, /* RX time = 1, RX pkts = 2, CM = 1 */
43 0x80150000,
44 0x80270000,
45 0x80370000,
46 0x80490000,
47 0x80590000,
48 0x80690000,
49 0x807B0000,
50 0x808B0000,
51 0x809D0000,
52 0x80AD0000,
53 0x80BD0000,
54 0x80CF0000,
55 0x80DF0000,
56// 0x80FF0000 /* RX time = 16, RX pkts = 7, CM = 1 */
57 0x80F10000 /* RX time = 16, RX pkts = 0, CM = 1 */
58};
59#endif
60
61
62int tulip_refill_rx(struct net_device *dev)
63{
64 struct tulip_private *tp = netdev_priv(dev);
65 int entry;
66 int refilled = 0;
67
68 /* Refill the Rx ring buffers. */
69 for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
70 entry = tp->dirty_rx % RX_RING_SIZE;
71 if (tp->rx_buffers[entry].skb == NULL) {
72 struct sk_buff *skb;
73 dma_addr_t mapping;
74
75 skb = tp->rx_buffers[entry].skb = dev_alloc_skb(PKT_BUF_SZ);
76 if (skb == NULL)
77 break;
78
David S. Miller689be432005-06-28 15:25:31 -070079 mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ,
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 PCI_DMA_FROMDEVICE);
81 tp->rx_buffers[entry].mapping = mapping;
82
83 skb->dev = dev; /* Mark as being used by this device. */
84 tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
85 refilled++;
86 }
87 tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
88 }
89 if(tp->chip_id == LC82C168) {
90 if(((ioread32(tp->base_addr + CSR5)>>17)&0x07) == 4) {
91 /* Rx stopped due to out of buffers,
92 * restart it
93 */
94 iowrite32(0x01, tp->base_addr + CSR2);
95 }
96 }
97 return refilled;
98}
99
100#ifdef CONFIG_TULIP_NAPI
101
102void oom_timer(unsigned long data)
103{
104 struct net_device *dev = (struct net_device *)data;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700105 struct tulip_private *tp = netdev_priv(dev);
106 netif_rx_schedule(dev, &tp->napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107}
108
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700109int tulip_poll(struct napi_struct *napi, int budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110{
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700111 struct tulip_private *tp = container_of(napi, struct tulip_private, napi);
112 struct net_device *dev = tp->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 int entry = tp->cur_rx % RX_RING_SIZE;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700114 int work_done = 0;
115#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116 int received = 0;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700117#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
120
121/* that one buffer is needed for mit activation; or might be a
122 bug in the ring buffer code; check later -- JHS*/
123
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700124 if (budget >=RX_RING_SIZE) budget--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125#endif
126
127 if (tulip_debug > 4)
128 printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
129 tp->rx_ring[entry].status);
130
131 do {
132 if (ioread32(tp->base_addr + CSR5) == 0xffffffff) {
133 printk(KERN_DEBUG " In tulip_poll(), hardware disappeared.\n");
134 break;
135 }
136 /* Acknowledge current RX interrupt sources. */
137 iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400138
139
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140 /* If we own the next entry, it is a new packet. Send it up. */
141 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
142 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400143
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
145 break;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400146
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 if (tulip_debug > 5)
148 printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
149 dev->name, entry, status);
Stephen Hemmingerc6a1b622008-01-07 00:23:04 -0800150
151 if (++work_done >= budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 goto not_done;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400153
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 if ((status & 0x38008300) != 0x0300) {
155 if ((status & 0x38000300) != 0x0300) {
156 /* Ingore earlier buffers. */
157 if ((status & 0xffff) != 0x7fff) {
158 if (tulip_debug > 1)
159 printk(KERN_WARNING "%s: Oversized Ethernet frame "
160 "spanned multiple buffers, status %8.8x!\n",
161 dev->name, status);
162 tp->stats.rx_length_errors++;
163 }
164 } else if (status & RxDescFatalErr) {
165 /* There was a fatal error. */
166 if (tulip_debug > 2)
167 printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
168 dev->name, status);
169 tp->stats.rx_errors++; /* end of a packet.*/
170 if (status & 0x0890) tp->stats.rx_length_errors++;
171 if (status & 0x0004) tp->stats.rx_frame_errors++;
172 if (status & 0x0002) tp->stats.rx_crc_errors++;
173 if (status & 0x0001) tp->stats.rx_fifo_errors++;
174 }
175 } else {
176 /* Omit the four octet CRC from the length. */
177 short pkt_len = ((status >> 16) & 0x7ff) - 4;
178 struct sk_buff *skb;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400179
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180#ifndef final_version
181 if (pkt_len > 1518) {
182 printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
183 dev->name, pkt_len, pkt_len);
184 pkt_len = 1518;
185 tp->stats.rx_length_errors++;
186 }
187#endif
188 /* Check if the packet is long enough to accept without copying
189 to a minimally-sized skbuff. */
190 if (pkt_len < tulip_rx_copybreak
191 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 skb_reserve(skb, 2); /* 16 byte align the IP header */
193 pci_dma_sync_single_for_cpu(tp->pdev,
194 tp->rx_buffers[entry].mapping,
195 pkt_len, PCI_DMA_FROMDEVICE);
196#if ! defined(__alpha__)
David S. Miller8c7b7fa2007-07-10 22:08:12 -0700197 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
198 pkt_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 skb_put(skb, pkt_len);
200#else
201 memcpy(skb_put(skb, pkt_len),
David S. Miller689be432005-06-28 15:25:31 -0700202 tp->rx_buffers[entry].skb->data,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 pkt_len);
204#endif
205 pci_dma_sync_single_for_device(tp->pdev,
206 tp->rx_buffers[entry].mapping,
207 pkt_len, PCI_DMA_FROMDEVICE);
208 } else { /* Pass up the skb already on the Rx ring. */
209 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
210 pkt_len);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212#ifndef final_version
213 if (tp->rx_buffers[entry].mapping !=
214 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
215 printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
216 "do not match in tulip_rx: %08x vs. %08llx %p / %p.\n",
217 dev->name,
218 le32_to_cpu(tp->rx_ring[entry].buffer1),
219 (unsigned long long)tp->rx_buffers[entry].mapping,
220 skb->head, temp);
221 }
222#endif
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400223
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
225 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400226
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 tp->rx_buffers[entry].skb = NULL;
228 tp->rx_buffers[entry].mapping = 0;
229 }
230 skb->protocol = eth_type_trans(skb, dev);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400231
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 netif_receive_skb(skb);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400233
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 dev->last_rx = jiffies;
235 tp->stats.rx_packets++;
236 tp->stats.rx_bytes += pkt_len;
237 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700238#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
239 received++;
240#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241
242 entry = (++tp->cur_rx) % RX_RING_SIZE;
243 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4)
244 tulip_refill_rx(dev);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400245
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 }
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400247
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 /* New ack strategy... irq does not ack Rx any longer
249 hopefully this helps */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400250
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 /* Really bad things can happen here... If new packet arrives
252 * and an irq arrives (tx or just due to occasionally unset
253 * mask), it will be acked by irq handler, but new thread
254 * is not scheduled. It is major hole in design.
255 * No idea how to fix this if "playing with fire" will fail
256 * tomorrow (night 011029). If it will not fail, we won
257 * finally: amount of IO did not increase at all. */
258 } while ((ioread32(tp->base_addr + CSR5) & RxIntr));
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400259
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400261
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262 /* We use this simplistic scheme for IM. It's proven by
263 real life installations. We can have IM enabled
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400264 continuesly but this would cause unnecessary latency.
265 Unfortunely we can't use all the NET_RX_* feedback here.
266 This would turn on IM for devices that is not contributing
267 to backlog congestion with unnecessary latency.
268
Michael Opdenacker59c51592007-05-09 08:57:56 +0200269 We monitor the device RX-ring and have:
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400270
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 HW Interrupt Mitigation either ON or OFF.
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400272
273 ON: More then 1 pkt received (per intr.) OR we are dropping
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 OFF: Only 1 pkt received
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400275
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276 Note. We only use min and max (0, 15) settings from mit_table */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400277
278
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279 if( tp->flags & HAS_INTR_MITIGATION) {
280 if( received > 1 ) {
281 if( ! tp->mit_on ) {
282 tp->mit_on = 1;
283 iowrite32(mit_table[MIT_TABLE], tp->base_addr + CSR11);
284 }
285 }
286 else {
287 if( tp->mit_on ) {
288 tp->mit_on = 0;
289 iowrite32(0, tp->base_addr + CSR11);
290 }
291 }
292 }
293
294#endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400295
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 tulip_refill_rx(dev);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400297
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 /* If RX ring is not full we are out of memory. */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700299 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
300 goto oom;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400301
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 /* Remove us from polling list and enable RX intr. */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400303
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700304 netif_rx_complete(dev, napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400306
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 /* The last op happens after poll completion. Which means the following:
308 * 1. it can race with disabling irqs in irq handler
309 * 2. it can race with dise/enabling irqs in other poll threads
310 * 3. if an irq raised after beginning loop, it will be immediately
311 * triggered here.
312 *
313 * Summarizing: the logic results in some redundant irqs both
314 * due to races in masking and due to too late acking of already
315 * processed irqs. But it must not result in losing events.
316 */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400317
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700318 return work_done;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400319
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 not_done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 ||
322 tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
323 tulip_refill_rx(dev);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400324
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700325 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
326 goto oom;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400327
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700328 return work_done;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400329
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 oom: /* Executed with RX ints disabled */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400331
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332 /* Start timer, stop polling, but do not enable rx interrupts. */
333 mod_timer(&tp->oom_timer, jiffies+1);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400334
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 /* Think: timer_pending() was an explicit signature of bug.
336 * Timer can be pending now but fired and completed
337 * before we did netif_rx_complete(). See? We would lose it. */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400338
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 /* remove ourselves from the polling list */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700340 netif_rx_complete(dev, napi);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400341
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700342 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343}
344
345#else /* CONFIG_TULIP_NAPI */
346
347static int tulip_rx(struct net_device *dev)
348{
349 struct tulip_private *tp = netdev_priv(dev);
350 int entry = tp->cur_rx % RX_RING_SIZE;
351 int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
352 int received = 0;
353
354 if (tulip_debug > 4)
355 printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
356 tp->rx_ring[entry].status);
357 /* If we own the next entry, it is a new packet. Send it up. */
358 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
359 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
360
361 if (tulip_debug > 5)
362 printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
363 dev->name, entry, status);
364 if (--rx_work_limit < 0)
365 break;
366 if ((status & 0x38008300) != 0x0300) {
367 if ((status & 0x38000300) != 0x0300) {
368 /* Ingore earlier buffers. */
369 if ((status & 0xffff) != 0x7fff) {
370 if (tulip_debug > 1)
371 printk(KERN_WARNING "%s: Oversized Ethernet frame "
372 "spanned multiple buffers, status %8.8x!\n",
373 dev->name, status);
374 tp->stats.rx_length_errors++;
375 }
376 } else if (status & RxDescFatalErr) {
377 /* There was a fatal error. */
378 if (tulip_debug > 2)
379 printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
380 dev->name, status);
381 tp->stats.rx_errors++; /* end of a packet.*/
382 if (status & 0x0890) tp->stats.rx_length_errors++;
383 if (status & 0x0004) tp->stats.rx_frame_errors++;
384 if (status & 0x0002) tp->stats.rx_crc_errors++;
385 if (status & 0x0001) tp->stats.rx_fifo_errors++;
386 }
387 } else {
388 /* Omit the four octet CRC from the length. */
389 short pkt_len = ((status >> 16) & 0x7ff) - 4;
390 struct sk_buff *skb;
391
392#ifndef final_version
393 if (pkt_len > 1518) {
394 printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
395 dev->name, pkt_len, pkt_len);
396 pkt_len = 1518;
397 tp->stats.rx_length_errors++;
398 }
399#endif
400
401 /* Check if the packet is long enough to accept without copying
402 to a minimally-sized skbuff. */
403 if (pkt_len < tulip_rx_copybreak
404 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 skb_reserve(skb, 2); /* 16 byte align the IP header */
406 pci_dma_sync_single_for_cpu(tp->pdev,
407 tp->rx_buffers[entry].mapping,
408 pkt_len, PCI_DMA_FROMDEVICE);
409#if ! defined(__alpha__)
David S. Miller8c7b7fa2007-07-10 22:08:12 -0700410 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
411 pkt_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 skb_put(skb, pkt_len);
413#else
414 memcpy(skb_put(skb, pkt_len),
David S. Miller689be432005-06-28 15:25:31 -0700415 tp->rx_buffers[entry].skb->data,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 pkt_len);
417#endif
418 pci_dma_sync_single_for_device(tp->pdev,
419 tp->rx_buffers[entry].mapping,
420 pkt_len, PCI_DMA_FROMDEVICE);
421 } else { /* Pass up the skb already on the Rx ring. */
422 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
423 pkt_len);
424
425#ifndef final_version
426 if (tp->rx_buffers[entry].mapping !=
427 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
428 printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
429 "do not match in tulip_rx: %08x vs. %Lx %p / %p.\n",
430 dev->name,
431 le32_to_cpu(tp->rx_ring[entry].buffer1),
432 (long long)tp->rx_buffers[entry].mapping,
433 skb->head, temp);
434 }
435#endif
436
437 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
438 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
439
440 tp->rx_buffers[entry].skb = NULL;
441 tp->rx_buffers[entry].mapping = 0;
442 }
443 skb->protocol = eth_type_trans(skb, dev);
444
445 netif_rx(skb);
446
447 dev->last_rx = jiffies;
448 tp->stats.rx_packets++;
449 tp->stats.rx_bytes += pkt_len;
450 }
451 received++;
452 entry = (++tp->cur_rx) % RX_RING_SIZE;
453 }
454 return received;
455}
456#endif /* CONFIG_TULIP_NAPI */
457
458static inline unsigned int phy_interrupt (struct net_device *dev)
459{
460#ifdef __hppa__
461 struct tulip_private *tp = netdev_priv(dev);
462 int csr12 = ioread32(tp->base_addr + CSR12) & 0xff;
463
464 if (csr12 != tp->csr12_shadow) {
465 /* ack interrupt */
466 iowrite32(csr12 | 0x02, tp->base_addr + CSR12);
467 tp->csr12_shadow = csr12;
468 /* do link change stuff */
469 spin_lock(&tp->lock);
470 tulip_check_duplex(dev);
471 spin_unlock(&tp->lock);
472 /* clear irq ack bit */
473 iowrite32(csr12 & ~0x02, tp->base_addr + CSR12);
474
475 return 1;
476 }
477#endif
478
479 return 0;
480}
481
482/* The interrupt handler does all of the Rx thread work and cleans up
483 after the Tx thread. */
David Howells7d12e782006-10-05 14:55:46 +0100484irqreturn_t tulip_interrupt(int irq, void *dev_instance)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485{
486 struct net_device *dev = (struct net_device *)dev_instance;
487 struct tulip_private *tp = netdev_priv(dev);
488 void __iomem *ioaddr = tp->base_addr;
489 int csr5;
490 int missed;
491 int rx = 0;
492 int tx = 0;
493 int oi = 0;
494 int maxrx = RX_RING_SIZE;
495 int maxtx = TX_RING_SIZE;
496 int maxoi = TX_RING_SIZE;
497#ifdef CONFIG_TULIP_NAPI
498 int rxd = 0;
499#else
500 int entry;
501#endif
502 unsigned int work_count = tulip_max_interrupt_work;
503 unsigned int handled = 0;
504
505 /* Let's see whether the interrupt really is for us */
506 csr5 = ioread32(ioaddr + CSR5);
507
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400508 if (tp->flags & HAS_PHY_IRQ)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 handled = phy_interrupt (dev);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400510
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
512 return IRQ_RETVAL(handled);
513
514 tp->nir++;
515
516 do {
517
518#ifdef CONFIG_TULIP_NAPI
519
520 if (!rxd && (csr5 & (RxIntr | RxNoBuf))) {
521 rxd++;
522 /* Mask RX intrs and add the device to poll list. */
523 iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700524 netif_rx_schedule(dev, &tp->napi);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400525
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526 if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass)))
527 break;
528 }
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400529
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530 /* Acknowledge the interrupt sources we handle here ASAP
531 the poll function does Rx and RxNoBuf acking */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400532
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5);
534
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400535#else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 /* Acknowledge all of the current interrupt sources ASAP. */
537 iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5);
538
539
540 if (csr5 & (RxIntr | RxNoBuf)) {
541 rx += tulip_rx(dev);
542 tulip_refill_rx(dev);
543 }
544
545#endif /* CONFIG_TULIP_NAPI */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400546
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 if (tulip_debug > 4)
548 printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n",
549 dev->name, csr5, ioread32(ioaddr + CSR5));
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400550
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551
552 if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
553 unsigned int dirty_tx;
554
555 spin_lock(&tp->lock);
556
557 for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
558 dirty_tx++) {
559 int entry = dirty_tx % TX_RING_SIZE;
560 int status = le32_to_cpu(tp->tx_ring[entry].status);
561
562 if (status < 0)
563 break; /* It still has not been Txed */
564
565 /* Check for Rx filter setup frames. */
566 if (tp->tx_buffers[entry].skb == NULL) {
567 /* test because dummy frames not mapped */
568 if (tp->tx_buffers[entry].mapping)
569 pci_unmap_single(tp->pdev,
570 tp->tx_buffers[entry].mapping,
571 sizeof(tp->setup_frame),
572 PCI_DMA_TODEVICE);
573 continue;
574 }
575
576 if (status & 0x8000) {
577 /* There was an major error, log it. */
578#ifndef final_version
579 if (tulip_debug > 1)
580 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
581 dev->name, status);
582#endif
583 tp->stats.tx_errors++;
584 if (status & 0x4104) tp->stats.tx_aborted_errors++;
585 if (status & 0x0C00) tp->stats.tx_carrier_errors++;
586 if (status & 0x0200) tp->stats.tx_window_errors++;
587 if (status & 0x0002) tp->stats.tx_fifo_errors++;
588 if ((status & 0x0080) && tp->full_duplex == 0)
589 tp->stats.tx_heartbeat_errors++;
590 } else {
591 tp->stats.tx_bytes +=
592 tp->tx_buffers[entry].skb->len;
593 tp->stats.collisions += (status >> 3) & 15;
594 tp->stats.tx_packets++;
595 }
596
597 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
598 tp->tx_buffers[entry].skb->len,
599 PCI_DMA_TODEVICE);
600
601 /* Free the original skb. */
602 dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
603 tp->tx_buffers[entry].skb = NULL;
604 tp->tx_buffers[entry].mapping = 0;
605 tx++;
606 }
607
608#ifndef final_version
609 if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
610 printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n",
611 dev->name, dirty_tx, tp->cur_tx);
612 dirty_tx += TX_RING_SIZE;
613 }
614#endif
615
616 if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
617 netif_wake_queue(dev);
618
619 tp->dirty_tx = dirty_tx;
620 if (csr5 & TxDied) {
621 if (tulip_debug > 2)
622 printk(KERN_WARNING "%s: The transmitter stopped."
623 " CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
624 dev->name, csr5, ioread32(ioaddr + CSR6), tp->csr6);
625 tulip_restart_rxtx(tp);
626 }
627 spin_unlock(&tp->lock);
628 }
629
630 /* Log errors. */
631 if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */
632 if (csr5 == 0xffffffff)
633 break;
634 if (csr5 & TxJabber) tp->stats.tx_errors++;
635 if (csr5 & TxFIFOUnderflow) {
636 if ((tp->csr6 & 0xC000) != 0xC000)
637 tp->csr6 += 0x4000; /* Bump up the Tx threshold */
638 else
639 tp->csr6 |= 0x00200000; /* Store-n-forward. */
640 /* Restart the transmit process. */
641 tulip_restart_rxtx(tp);
642 iowrite32(0, ioaddr + CSR1);
643 }
644 if (csr5 & (RxDied | RxNoBuf)) {
645 if (tp->flags & COMET_MAC_ADDR) {
646 iowrite32(tp->mc_filter[0], ioaddr + 0xAC);
647 iowrite32(tp->mc_filter[1], ioaddr + 0xB0);
648 }
649 }
650 if (csr5 & RxDied) { /* Missed a Rx frame. */
651 tp->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
652 tp->stats.rx_errors++;
653 tulip_start_rxtx(tp);
654 }
655 /*
656 * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
657 * call is ever done under the spinlock
658 */
659 if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
660 if (tp->link_change)
661 (tp->link_change)(dev, csr5);
662 }
Valerie Henson1ddb9862007-03-12 02:31:33 -0700663 if (csr5 & SystemError) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664 int error = (csr5 >> 23) & 7;
665 /* oops, we hit a PCI error. The code produced corresponds
666 * to the reason:
667 * 0 - parity error
668 * 1 - master abort
669 * 2 - target abort
670 * Note that on parity error, we should do a software reset
671 * of the chip to get it back into a sane state (according
672 * to the 21142/3 docs that is).
673 * -- rmk
674 */
675 printk(KERN_ERR "%s: (%lu) System Error occurred (%d)\n",
676 dev->name, tp->nir, error);
677 }
678 /* Clear all error sources, included undocumented ones! */
679 iowrite32(0x0800f7ba, ioaddr + CSR5);
680 oi++;
681 }
682 if (csr5 & TimerInt) {
683
684 if (tulip_debug > 2)
685 printk(KERN_ERR "%s: Re-enabling interrupts, %8.8x.\n",
686 dev->name, csr5);
687 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
688 tp->ttimer = 0;
689 oi++;
690 }
691 if (tx > maxtx || rx > maxrx || oi > maxoi) {
692 if (tulip_debug > 1)
693 printk(KERN_WARNING "%s: Too much work during an interrupt, "
694 "csr5=0x%8.8x. (%lu) (%d,%d,%d)\n", dev->name, csr5, tp->nir, tx, rx, oi);
695
696 /* Acknowledge all interrupt sources. */
697 iowrite32(0x8001ffff, ioaddr + CSR5);
698 if (tp->flags & HAS_INTR_MITIGATION) {
699 /* Josip Loncaric at ICASE did extensive experimentation
700 to develop a good interrupt mitigation setting.*/
701 iowrite32(0x8b240000, ioaddr + CSR11);
702 } else if (tp->chip_id == LC82C168) {
703 /* the LC82C168 doesn't have a hw timer.*/
704 iowrite32(0x00, ioaddr + CSR7);
705 mod_timer(&tp->timer, RUN_AT(HZ/50));
706 } else {
707 /* Mask all interrupting sources, set timer to
708 re-enable. */
709 iowrite32(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7);
710 iowrite32(0x0012, ioaddr + CSR11);
711 }
712 break;
713 }
714
715 work_count--;
716 if (work_count == 0)
717 break;
718
719 csr5 = ioread32(ioaddr + CSR5);
720
721#ifdef CONFIG_TULIP_NAPI
722 if (rxd)
723 csr5 &= ~RxPollInt;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400724 } while ((csr5 & (TxNoBuf |
725 TxDied |
726 TxIntr |
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727 TimerInt |
728 /* Abnormal intr. */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400729 RxDied |
730 TxFIFOUnderflow |
731 TxJabber |
732 TPLnkFail |
Valerie Henson1ddb9862007-03-12 02:31:33 -0700733 SystemError )) != 0);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400734#else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735 } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
736
737 tulip_refill_rx(dev);
738
739 /* check if the card is in suspend mode */
740 entry = tp->dirty_rx % RX_RING_SIZE;
741 if (tp->rx_buffers[entry].skb == NULL) {
742 if (tulip_debug > 1)
743 printk(KERN_WARNING "%s: in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n", dev->name, tp->nir, tp->cur_rx, tp->ttimer, rx);
744 if (tp->chip_id == LC82C168) {
745 iowrite32(0x00, ioaddr + CSR7);
746 mod_timer(&tp->timer, RUN_AT(HZ/50));
747 } else {
748 if (tp->ttimer == 0 || (ioread32(ioaddr + CSR11) & 0xffff) == 0) {
749 if (tulip_debug > 1)
750 printk(KERN_WARNING "%s: in rx suspend mode: (%lu) set timer\n", dev->name, tp->nir);
751 iowrite32(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
752 ioaddr + CSR7);
753 iowrite32(TimerInt, ioaddr + CSR5);
754 iowrite32(12, ioaddr + CSR11);
755 tp->ttimer = 1;
756 }
757 }
758 }
759#endif /* CONFIG_TULIP_NAPI */
760
761 if ((missed = ioread32(ioaddr + CSR8) & 0x1ffff)) {
762 tp->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
763 }
764
765 if (tulip_debug > 4)
766 printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
767 dev->name, ioread32(ioaddr + CSR5));
768
769 return IRQ_HANDLED;
770}