blob: 2e8e8ee893c7fee9e309570877e1a5b675dcaf7c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 drivers/net/tulip/interrupt.c
3
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 Copyright 2000,2001 The Linux Kernel Team
5 Written/copyright 1994-2001 by Donald Becker.
6
7 This software may be used and distributed according to the terms
8 of the GNU General Public License, incorporated herein by reference.
9
10 Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
Grant Grundler78a65512008-06-05 00:38:55 -060011 for more information on this driver.
12 Please submit bugs to http://bugzilla.kernel.org/ .
Linus Torvalds1da177e2005-04-16 15:20:36 -070013
14*/
15
16#include <linux/pci.h>
17#include "tulip.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/etherdevice.h>
19
20int tulip_rx_copybreak;
21unsigned int tulip_max_interrupt_work;
22
23#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
24#define MIT_SIZE 15
25#define MIT_TABLE 15 /* We use 0 or max */
26
27static unsigned int mit_table[MIT_SIZE+1] =
28{
29 /* CRS11 21143 hardware Mitigation Control Interrupt
30 We use only RX mitigation we other techniques for
31 TX intr. mitigation.
32
33 31 Cycle Size (timer control)
34 30:27 TX timer in 16 * Cycle size
35 26:24 TX No pkts before Int.
36 23:20 RX timer in Cycle size
37 19:17 RX No pkts before Int.
38 16 Continues Mode (CM)
39 */
40
41 0x0, /* IM disabled */
42 0x80150000, /* RX time = 1, RX pkts = 2, CM = 1 */
43 0x80150000,
44 0x80270000,
45 0x80370000,
46 0x80490000,
47 0x80590000,
48 0x80690000,
49 0x807B0000,
50 0x808B0000,
51 0x809D0000,
52 0x80AD0000,
53 0x80BD0000,
54 0x80CF0000,
55 0x80DF0000,
56// 0x80FF0000 /* RX time = 16, RX pkts = 7, CM = 1 */
57 0x80F10000 /* RX time = 16, RX pkts = 0, CM = 1 */
58};
59#endif
60
61
62int tulip_refill_rx(struct net_device *dev)
63{
64 struct tulip_private *tp = netdev_priv(dev);
65 int entry;
66 int refilled = 0;
67
68 /* Refill the Rx ring buffers. */
69 for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
70 entry = tp->dirty_rx % RX_RING_SIZE;
71 if (tp->rx_buffers[entry].skb == NULL) {
72 struct sk_buff *skb;
73 dma_addr_t mapping;
74
75 skb = tp->rx_buffers[entry].skb = dev_alloc_skb(PKT_BUF_SZ);
76 if (skb == NULL)
77 break;
78
David S. Miller689be432005-06-28 15:25:31 -070079 mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ,
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 PCI_DMA_FROMDEVICE);
81 tp->rx_buffers[entry].mapping = mapping;
82
83 skb->dev = dev; /* Mark as being used by this device. */
84 tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
85 refilled++;
86 }
87 tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
88 }
89 if(tp->chip_id == LC82C168) {
90 if(((ioread32(tp->base_addr + CSR5)>>17)&0x07) == 4) {
91 /* Rx stopped due to out of buffers,
92 * restart it
93 */
94 iowrite32(0x01, tp->base_addr + CSR2);
95 }
96 }
97 return refilled;
98}
99
100#ifdef CONFIG_TULIP_NAPI
101
102void oom_timer(unsigned long data)
103{
104 struct net_device *dev = (struct net_device *)data;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700105 struct tulip_private *tp = netdev_priv(dev);
Ben Hutchings288379f2009-01-19 16:43:59 -0800106 napi_schedule(&tp->napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107}
108
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700109int tulip_poll(struct napi_struct *napi, int budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110{
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700111 struct tulip_private *tp = container_of(napi, struct tulip_private, napi);
112 struct net_device *dev = tp->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 int entry = tp->cur_rx % RX_RING_SIZE;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700114 int work_done = 0;
115#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116 int received = 0;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700117#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
120
121/* that one buffer is needed for mit activation; or might be a
122 bug in the ring buffer code; check later -- JHS*/
123
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700124 if (budget >=RX_RING_SIZE) budget--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125#endif
126
127 if (tulip_debug > 4)
128 printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
129 tp->rx_ring[entry].status);
130
131 do {
132 if (ioread32(tp->base_addr + CSR5) == 0xffffffff) {
133 printk(KERN_DEBUG " In tulip_poll(), hardware disappeared.\n");
134 break;
135 }
136 /* Acknowledge current RX interrupt sources. */
137 iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400138
139
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140 /* If we own the next entry, it is a new packet. Send it up. */
141 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
142 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
Tomasz Lemiech1f8ae0a2009-03-13 15:43:38 -0700143 short pkt_len;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400144
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145 if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
146 break;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400147
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 if (tulip_debug > 5)
149 printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
150 dev->name, entry, status);
Stephen Hemmingerc6a1b622008-01-07 00:23:04 -0800151
152 if (++work_done >= budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 goto not_done;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400154
Tomasz Lemiech1f8ae0a2009-03-13 15:43:38 -0700155 /*
156 * Omit the four octet CRC from the length.
157 * (May not be considered valid until we have
158 * checked status for RxLengthOver2047 bits)
159 */
160 pkt_len = ((status >> 16) & 0x7ff) - 4;
161
162 /*
163 * Maximum pkt_len is 1518 (1514 + vlan header)
164 * Anything higher than this is always invalid
165 * regardless of RxLengthOver2047 bits
166 */
167
168 if ((status & (RxLengthOver2047 |
169 RxDescCRCError |
170 RxDescCollisionSeen |
171 RxDescRunt |
172 RxDescDescErr |
Joe Perches8e95a202009-12-03 07:58:21 +0000173 RxWholePkt)) != RxWholePkt ||
174 pkt_len > 1518) {
Tomasz Lemiech1f8ae0a2009-03-13 15:43:38 -0700175 if ((status & (RxLengthOver2047 |
176 RxWholePkt)) != RxWholePkt) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 /* Ingore earlier buffers. */
178 if ((status & 0xffff) != 0x7fff) {
179 if (tulip_debug > 1)
180 printk(KERN_WARNING "%s: Oversized Ethernet frame "
181 "spanned multiple buffers, status %8.8x!\n",
182 dev->name, status);
183 tp->stats.rx_length_errors++;
184 }
Tomasz Lemiech1f8ae0a2009-03-13 15:43:38 -0700185 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 /* There was a fatal error. */
187 if (tulip_debug > 2)
188 printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
189 dev->name, status);
190 tp->stats.rx_errors++; /* end of a packet.*/
Tomasz Lemiech1f8ae0a2009-03-13 15:43:38 -0700191 if (pkt_len > 1518 ||
192 (status & RxDescRunt))
193 tp->stats.rx_length_errors++;
194
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 if (status & 0x0004) tp->stats.rx_frame_errors++;
196 if (status & 0x0002) tp->stats.rx_crc_errors++;
197 if (status & 0x0001) tp->stats.rx_fifo_errors++;
198 }
199 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 struct sk_buff *skb;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400201
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 /* Check if the packet is long enough to accept without copying
203 to a minimally-sized skbuff. */
Joe Perches8e95a202009-12-03 07:58:21 +0000204 if (pkt_len < tulip_rx_copybreak &&
205 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 skb_reserve(skb, 2); /* 16 byte align the IP header */
207 pci_dma_sync_single_for_cpu(tp->pdev,
208 tp->rx_buffers[entry].mapping,
209 pkt_len, PCI_DMA_FROMDEVICE);
210#if ! defined(__alpha__)
David S. Miller8c7b7fa2007-07-10 22:08:12 -0700211 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
212 pkt_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 skb_put(skb, pkt_len);
214#else
215 memcpy(skb_put(skb, pkt_len),
David S. Miller689be432005-06-28 15:25:31 -0700216 tp->rx_buffers[entry].skb->data,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 pkt_len);
218#endif
219 pci_dma_sync_single_for_device(tp->pdev,
220 tp->rx_buffers[entry].mapping,
221 pkt_len, PCI_DMA_FROMDEVICE);
222 } else { /* Pass up the skb already on the Rx ring. */
223 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
224 pkt_len);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400225
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226#ifndef final_version
227 if (tp->rx_buffers[entry].mapping !=
228 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
229 printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
230 "do not match in tulip_rx: %08x vs. %08llx %p / %p.\n",
231 dev->name,
232 le32_to_cpu(tp->rx_ring[entry].buffer1),
233 (unsigned long long)tp->rx_buffers[entry].mapping,
234 skb->head, temp);
235 }
236#endif
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400237
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
239 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400240
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 tp->rx_buffers[entry].skb = NULL;
242 tp->rx_buffers[entry].mapping = 0;
243 }
244 skb->protocol = eth_type_trans(skb, dev);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400245
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 netif_receive_skb(skb);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400247
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 tp->stats.rx_packets++;
249 tp->stats.rx_bytes += pkt_len;
250 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700251#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
252 received++;
253#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
255 entry = (++tp->cur_rx) % RX_RING_SIZE;
256 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4)
257 tulip_refill_rx(dev);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400258
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 }
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400260
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 /* New ack strategy... irq does not ack Rx any longer
262 hopefully this helps */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400263
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264 /* Really bad things can happen here... If new packet arrives
265 * and an irq arrives (tx or just due to occasionally unset
266 * mask), it will be acked by irq handler, but new thread
267 * is not scheduled. It is major hole in design.
268 * No idea how to fix this if "playing with fire" will fail
269 * tomorrow (night 011029). If it will not fail, we won
270 * finally: amount of IO did not increase at all. */
271 } while ((ioread32(tp->base_addr + CSR5) & RxIntr));
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400272
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400274
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 /* We use this simplistic scheme for IM. It's proven by
276 real life installations. We can have IM enabled
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400277 continuesly but this would cause unnecessary latency.
278 Unfortunely we can't use all the NET_RX_* feedback here.
279 This would turn on IM for devices that is not contributing
280 to backlog congestion with unnecessary latency.
281
Michael Opdenacker59c51592007-05-09 08:57:56 +0200282 We monitor the device RX-ring and have:
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400283
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 HW Interrupt Mitigation either ON or OFF.
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400285
286 ON: More then 1 pkt received (per intr.) OR we are dropping
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 OFF: Only 1 pkt received
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400288
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 Note. We only use min and max (0, 15) settings from mit_table */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400290
291
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 if( tp->flags & HAS_INTR_MITIGATION) {
293 if( received > 1 ) {
294 if( ! tp->mit_on ) {
295 tp->mit_on = 1;
296 iowrite32(mit_table[MIT_TABLE], tp->base_addr + CSR11);
297 }
298 }
299 else {
300 if( tp->mit_on ) {
301 tp->mit_on = 0;
302 iowrite32(0, tp->base_addr + CSR11);
303 }
304 }
305 }
306
307#endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400308
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 tulip_refill_rx(dev);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400310
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 /* If RX ring is not full we are out of memory. */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700312 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
313 goto oom;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400314
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 /* Remove us from polling list and enable RX intr. */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400316
Ben Hutchings288379f2009-01-19 16:43:59 -0800317 napi_complete(napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400319
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 /* The last op happens after poll completion. Which means the following:
321 * 1. it can race with disabling irqs in irq handler
322 * 2. it can race with dise/enabling irqs in other poll threads
323 * 3. if an irq raised after beginning loop, it will be immediately
324 * triggered here.
325 *
326 * Summarizing: the logic results in some redundant irqs both
327 * due to races in masking and due to too late acking of already
328 * processed irqs. But it must not result in losing events.
329 */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400330
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700331 return work_done;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400332
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333 not_done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 ||
335 tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
336 tulip_refill_rx(dev);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400337
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700338 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
339 goto oom;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400340
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700341 return work_done;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400342
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 oom: /* Executed with RX ints disabled */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400344
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 /* Start timer, stop polling, but do not enable rx interrupts. */
346 mod_timer(&tp->oom_timer, jiffies+1);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400347
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 /* Think: timer_pending() was an explicit signature of bug.
349 * Timer can be pending now but fired and completed
Ben Hutchings288379f2009-01-19 16:43:59 -0800350 * before we did napi_complete(). See? We would lose it. */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400351
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352 /* remove ourselves from the polling list */
Ben Hutchings288379f2009-01-19 16:43:59 -0800353 napi_complete(napi);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400354
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700355 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356}
357
358#else /* CONFIG_TULIP_NAPI */
359
360static int tulip_rx(struct net_device *dev)
361{
362 struct tulip_private *tp = netdev_priv(dev);
363 int entry = tp->cur_rx % RX_RING_SIZE;
364 int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
365 int received = 0;
366
367 if (tulip_debug > 4)
368 printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
369 tp->rx_ring[entry].status);
370 /* If we own the next entry, it is a new packet. Send it up. */
371 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
372 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
Tomasz Lemiech1f8ae0a2009-03-13 15:43:38 -0700373 short pkt_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374
375 if (tulip_debug > 5)
376 printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
377 dev->name, entry, status);
378 if (--rx_work_limit < 0)
379 break;
Tomasz Lemiech1f8ae0a2009-03-13 15:43:38 -0700380
381 /*
382 Omit the four octet CRC from the length.
383 (May not be considered valid until we have
384 checked status for RxLengthOver2047 bits)
385 */
386 pkt_len = ((status >> 16) & 0x7ff) - 4;
387 /*
388 Maximum pkt_len is 1518 (1514 + vlan header)
389 Anything higher than this is always invalid
390 regardless of RxLengthOver2047 bits
391 */
392
393 if ((status & (RxLengthOver2047 |
394 RxDescCRCError |
395 RxDescCollisionSeen |
396 RxDescRunt |
397 RxDescDescErr |
Joe Perches8e95a202009-12-03 07:58:21 +0000398 RxWholePkt)) != RxWholePkt ||
399 pkt_len > 1518) {
Tomasz Lemiech1f8ae0a2009-03-13 15:43:38 -0700400 if ((status & (RxLengthOver2047 |
401 RxWholePkt)) != RxWholePkt) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 /* Ingore earlier buffers. */
403 if ((status & 0xffff) != 0x7fff) {
404 if (tulip_debug > 1)
405 printk(KERN_WARNING "%s: Oversized Ethernet frame "
406 "spanned multiple buffers, status %8.8x!\n",
407 dev->name, status);
408 tp->stats.rx_length_errors++;
409 }
Tomasz Lemiech1f8ae0a2009-03-13 15:43:38 -0700410 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 /* There was a fatal error. */
412 if (tulip_debug > 2)
413 printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
414 dev->name, status);
415 tp->stats.rx_errors++; /* end of a packet.*/
Tomasz Lemiech1f8ae0a2009-03-13 15:43:38 -0700416 if (pkt_len > 1518 ||
417 (status & RxDescRunt))
418 tp->stats.rx_length_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 if (status & 0x0004) tp->stats.rx_frame_errors++;
420 if (status & 0x0002) tp->stats.rx_crc_errors++;
421 if (status & 0x0001) tp->stats.rx_fifo_errors++;
422 }
423 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424 struct sk_buff *skb;
425
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 /* Check if the packet is long enough to accept without copying
427 to a minimally-sized skbuff. */
Joe Perches8e95a202009-12-03 07:58:21 +0000428 if (pkt_len < tulip_rx_copybreak &&
429 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 skb_reserve(skb, 2); /* 16 byte align the IP header */
431 pci_dma_sync_single_for_cpu(tp->pdev,
432 tp->rx_buffers[entry].mapping,
433 pkt_len, PCI_DMA_FROMDEVICE);
434#if ! defined(__alpha__)
David S. Miller8c7b7fa2007-07-10 22:08:12 -0700435 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
436 pkt_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437 skb_put(skb, pkt_len);
438#else
439 memcpy(skb_put(skb, pkt_len),
David S. Miller689be432005-06-28 15:25:31 -0700440 tp->rx_buffers[entry].skb->data,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441 pkt_len);
442#endif
443 pci_dma_sync_single_for_device(tp->pdev,
444 tp->rx_buffers[entry].mapping,
445 pkt_len, PCI_DMA_FROMDEVICE);
446 } else { /* Pass up the skb already on the Rx ring. */
447 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
448 pkt_len);
449
450#ifndef final_version
451 if (tp->rx_buffers[entry].mapping !=
452 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
453 printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
454 "do not match in tulip_rx: %08x vs. %Lx %p / %p.\n",
455 dev->name,
456 le32_to_cpu(tp->rx_ring[entry].buffer1),
457 (long long)tp->rx_buffers[entry].mapping,
458 skb->head, temp);
459 }
460#endif
461
462 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
463 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
464
465 tp->rx_buffers[entry].skb = NULL;
466 tp->rx_buffers[entry].mapping = 0;
467 }
468 skb->protocol = eth_type_trans(skb, dev);
469
470 netif_rx(skb);
471
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 tp->stats.rx_packets++;
473 tp->stats.rx_bytes += pkt_len;
474 }
475 received++;
476 entry = (++tp->cur_rx) % RX_RING_SIZE;
477 }
478 return received;
479}
480#endif /* CONFIG_TULIP_NAPI */
481
482static inline unsigned int phy_interrupt (struct net_device *dev)
483{
484#ifdef __hppa__
485 struct tulip_private *tp = netdev_priv(dev);
486 int csr12 = ioread32(tp->base_addr + CSR12) & 0xff;
487
488 if (csr12 != tp->csr12_shadow) {
489 /* ack interrupt */
490 iowrite32(csr12 | 0x02, tp->base_addr + CSR12);
491 tp->csr12_shadow = csr12;
492 /* do link change stuff */
493 spin_lock(&tp->lock);
494 tulip_check_duplex(dev);
495 spin_unlock(&tp->lock);
496 /* clear irq ack bit */
497 iowrite32(csr12 & ~0x02, tp->base_addr + CSR12);
498
499 return 1;
500 }
501#endif
502
503 return 0;
504}
505
506/* The interrupt handler does all of the Rx thread work and cleans up
507 after the Tx thread. */
David Howells7d12e782006-10-05 14:55:46 +0100508irqreturn_t tulip_interrupt(int irq, void *dev_instance)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509{
510 struct net_device *dev = (struct net_device *)dev_instance;
511 struct tulip_private *tp = netdev_priv(dev);
512 void __iomem *ioaddr = tp->base_addr;
513 int csr5;
514 int missed;
515 int rx = 0;
516 int tx = 0;
517 int oi = 0;
518 int maxrx = RX_RING_SIZE;
519 int maxtx = TX_RING_SIZE;
520 int maxoi = TX_RING_SIZE;
521#ifdef CONFIG_TULIP_NAPI
522 int rxd = 0;
523#else
524 int entry;
525#endif
526 unsigned int work_count = tulip_max_interrupt_work;
527 unsigned int handled = 0;
528
529 /* Let's see whether the interrupt really is for us */
530 csr5 = ioread32(ioaddr + CSR5);
531
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400532 if (tp->flags & HAS_PHY_IRQ)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 handled = phy_interrupt (dev);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400534
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
536 return IRQ_RETVAL(handled);
537
538 tp->nir++;
539
540 do {
541
542#ifdef CONFIG_TULIP_NAPI
543
544 if (!rxd && (csr5 & (RxIntr | RxNoBuf))) {
545 rxd++;
546 /* Mask RX intrs and add the device to poll list. */
547 iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7);
Ben Hutchings288379f2009-01-19 16:43:59 -0800548 napi_schedule(&tp->napi);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400549
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass)))
551 break;
552 }
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400553
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554 /* Acknowledge the interrupt sources we handle here ASAP
555 the poll function does Rx and RxNoBuf acking */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400556
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5);
558
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400559#else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560 /* Acknowledge all of the current interrupt sources ASAP. */
561 iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5);
562
563
564 if (csr5 & (RxIntr | RxNoBuf)) {
565 rx += tulip_rx(dev);
566 tulip_refill_rx(dev);
567 }
568
569#endif /* CONFIG_TULIP_NAPI */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400570
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 if (tulip_debug > 4)
572 printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n",
573 dev->name, csr5, ioread32(ioaddr + CSR5));
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400574
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575
576 if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
577 unsigned int dirty_tx;
578
579 spin_lock(&tp->lock);
580
581 for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
582 dirty_tx++) {
583 int entry = dirty_tx % TX_RING_SIZE;
584 int status = le32_to_cpu(tp->tx_ring[entry].status);
585
586 if (status < 0)
587 break; /* It still has not been Txed */
588
589 /* Check for Rx filter setup frames. */
590 if (tp->tx_buffers[entry].skb == NULL) {
591 /* test because dummy frames not mapped */
592 if (tp->tx_buffers[entry].mapping)
593 pci_unmap_single(tp->pdev,
594 tp->tx_buffers[entry].mapping,
595 sizeof(tp->setup_frame),
596 PCI_DMA_TODEVICE);
597 continue;
598 }
599
600 if (status & 0x8000) {
601 /* There was an major error, log it. */
602#ifndef final_version
603 if (tulip_debug > 1)
604 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
605 dev->name, status);
606#endif
607 tp->stats.tx_errors++;
608 if (status & 0x4104) tp->stats.tx_aborted_errors++;
609 if (status & 0x0C00) tp->stats.tx_carrier_errors++;
610 if (status & 0x0200) tp->stats.tx_window_errors++;
611 if (status & 0x0002) tp->stats.tx_fifo_errors++;
612 if ((status & 0x0080) && tp->full_duplex == 0)
613 tp->stats.tx_heartbeat_errors++;
614 } else {
615 tp->stats.tx_bytes +=
616 tp->tx_buffers[entry].skb->len;
617 tp->stats.collisions += (status >> 3) & 15;
618 tp->stats.tx_packets++;
619 }
620
621 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
622 tp->tx_buffers[entry].skb->len,
623 PCI_DMA_TODEVICE);
624
625 /* Free the original skb. */
626 dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
627 tp->tx_buffers[entry].skb = NULL;
628 tp->tx_buffers[entry].mapping = 0;
629 tx++;
630 }
631
632#ifndef final_version
633 if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
634 printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n",
635 dev->name, dirty_tx, tp->cur_tx);
636 dirty_tx += TX_RING_SIZE;
637 }
638#endif
639
640 if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
641 netif_wake_queue(dev);
642
643 tp->dirty_tx = dirty_tx;
644 if (csr5 & TxDied) {
645 if (tulip_debug > 2)
646 printk(KERN_WARNING "%s: The transmitter stopped."
647 " CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
648 dev->name, csr5, ioread32(ioaddr + CSR6), tp->csr6);
649 tulip_restart_rxtx(tp);
650 }
651 spin_unlock(&tp->lock);
652 }
653
654 /* Log errors. */
655 if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */
656 if (csr5 == 0xffffffff)
657 break;
658 if (csr5 & TxJabber) tp->stats.tx_errors++;
659 if (csr5 & TxFIFOUnderflow) {
660 if ((tp->csr6 & 0xC000) != 0xC000)
661 tp->csr6 += 0x4000; /* Bump up the Tx threshold */
662 else
663 tp->csr6 |= 0x00200000; /* Store-n-forward. */
664 /* Restart the transmit process. */
665 tulip_restart_rxtx(tp);
666 iowrite32(0, ioaddr + CSR1);
667 }
668 if (csr5 & (RxDied | RxNoBuf)) {
669 if (tp->flags & COMET_MAC_ADDR) {
670 iowrite32(tp->mc_filter[0], ioaddr + 0xAC);
671 iowrite32(tp->mc_filter[1], ioaddr + 0xB0);
672 }
673 }
674 if (csr5 & RxDied) { /* Missed a Rx frame. */
675 tp->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
676 tp->stats.rx_errors++;
677 tulip_start_rxtx(tp);
678 }
679 /*
680 * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
681 * call is ever done under the spinlock
682 */
683 if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
684 if (tp->link_change)
685 (tp->link_change)(dev, csr5);
686 }
Valerie Henson1ddb9862007-03-12 02:31:33 -0700687 if (csr5 & SystemError) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 int error = (csr5 >> 23) & 7;
689 /* oops, we hit a PCI error. The code produced corresponds
690 * to the reason:
691 * 0 - parity error
692 * 1 - master abort
693 * 2 - target abort
694 * Note that on parity error, we should do a software reset
695 * of the chip to get it back into a sane state (according
696 * to the 21142/3 docs that is).
697 * -- rmk
698 */
699 printk(KERN_ERR "%s: (%lu) System Error occurred (%d)\n",
700 dev->name, tp->nir, error);
701 }
702 /* Clear all error sources, included undocumented ones! */
703 iowrite32(0x0800f7ba, ioaddr + CSR5);
704 oi++;
705 }
706 if (csr5 & TimerInt) {
707
708 if (tulip_debug > 2)
709 printk(KERN_ERR "%s: Re-enabling interrupts, %8.8x.\n",
710 dev->name, csr5);
711 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
712 tp->ttimer = 0;
713 oi++;
714 }
715 if (tx > maxtx || rx > maxrx || oi > maxoi) {
716 if (tulip_debug > 1)
717 printk(KERN_WARNING "%s: Too much work during an interrupt, "
718 "csr5=0x%8.8x. (%lu) (%d,%d,%d)\n", dev->name, csr5, tp->nir, tx, rx, oi);
719
720 /* Acknowledge all interrupt sources. */
721 iowrite32(0x8001ffff, ioaddr + CSR5);
722 if (tp->flags & HAS_INTR_MITIGATION) {
723 /* Josip Loncaric at ICASE did extensive experimentation
724 to develop a good interrupt mitigation setting.*/
725 iowrite32(0x8b240000, ioaddr + CSR11);
726 } else if (tp->chip_id == LC82C168) {
727 /* the LC82C168 doesn't have a hw timer.*/
728 iowrite32(0x00, ioaddr + CSR7);
729 mod_timer(&tp->timer, RUN_AT(HZ/50));
730 } else {
731 /* Mask all interrupting sources, set timer to
732 re-enable. */
733 iowrite32(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7);
734 iowrite32(0x0012, ioaddr + CSR11);
735 }
736 break;
737 }
738
739 work_count--;
740 if (work_count == 0)
741 break;
742
743 csr5 = ioread32(ioaddr + CSR5);
744
745#ifdef CONFIG_TULIP_NAPI
746 if (rxd)
747 csr5 &= ~RxPollInt;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400748 } while ((csr5 & (TxNoBuf |
749 TxDied |
750 TxIntr |
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 TimerInt |
752 /* Abnormal intr. */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400753 RxDied |
754 TxFIFOUnderflow |
755 TxJabber |
756 TPLnkFail |
Valerie Henson1ddb9862007-03-12 02:31:33 -0700757 SystemError )) != 0);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400758#else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759 } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
760
761 tulip_refill_rx(dev);
762
763 /* check if the card is in suspend mode */
764 entry = tp->dirty_rx % RX_RING_SIZE;
765 if (tp->rx_buffers[entry].skb == NULL) {
766 if (tulip_debug > 1)
767 printk(KERN_WARNING "%s: in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n", dev->name, tp->nir, tp->cur_rx, tp->ttimer, rx);
768 if (tp->chip_id == LC82C168) {
769 iowrite32(0x00, ioaddr + CSR7);
770 mod_timer(&tp->timer, RUN_AT(HZ/50));
771 } else {
772 if (tp->ttimer == 0 || (ioread32(ioaddr + CSR11) & 0xffff) == 0) {
773 if (tulip_debug > 1)
774 printk(KERN_WARNING "%s: in rx suspend mode: (%lu) set timer\n", dev->name, tp->nir);
775 iowrite32(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
776 ioaddr + CSR7);
777 iowrite32(TimerInt, ioaddr + CSR5);
778 iowrite32(12, ioaddr + CSR11);
779 tp->ttimer = 1;
780 }
781 }
782 }
783#endif /* CONFIG_TULIP_NAPI */
784
785 if ((missed = ioread32(ioaddr + CSR8) & 0x1ffff)) {
786 tp->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
787 }
788
789 if (tulip_debug > 4)
790 printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
791 dev->name, ioread32(ioaddr + CSR5));
792
793 return IRQ_HANDLED;
794}