blob: c1ca0765d56d4b3fd163162b599103221411dc47 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Paul Gortmaker3396c782012-01-27 13:36:01 +00002 drivers/net/ethernet/dec/tulip/interrupt.c
Linus Torvalds1da177e2005-04-16 15:20:36 -07003
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 Copyright 2000,2001 The Linux Kernel Team
5 Written/copyright 1994-2001 by Donald Becker.
6
7 This software may be used and distributed according to the terms
8 of the GNU General Public License, incorporated herein by reference.
9
Grant Grundler78a65512008-06-05 00:38:55 -060010 Please submit bugs to http://bugzilla.kernel.org/ .
Linus Torvalds1da177e2005-04-16 15:20:36 -070011*/
12
13#include <linux/pci.h>
14#include "tulip.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/etherdevice.h>
16
17int tulip_rx_copybreak;
18unsigned int tulip_max_interrupt_work;
19
20#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
21#define MIT_SIZE 15
22#define MIT_TABLE 15 /* We use 0 or max */
23
24static unsigned int mit_table[MIT_SIZE+1] =
25{
26 /* CRS11 21143 hardware Mitigation Control Interrupt
27 We use only RX mitigation we other techniques for
28 TX intr. mitigation.
29
30 31 Cycle Size (timer control)
31 30:27 TX timer in 16 * Cycle size
32 26:24 TX No pkts before Int.
33 23:20 RX timer in Cycle size
34 19:17 RX No pkts before Int.
35 16 Continues Mode (CM)
36 */
37
38 0x0, /* IM disabled */
39 0x80150000, /* RX time = 1, RX pkts = 2, CM = 1 */
40 0x80150000,
41 0x80270000,
42 0x80370000,
43 0x80490000,
44 0x80590000,
45 0x80690000,
46 0x807B0000,
47 0x808B0000,
48 0x809D0000,
49 0x80AD0000,
50 0x80BD0000,
51 0x80CF0000,
52 0x80DF0000,
53// 0x80FF0000 /* RX time = 16, RX pkts = 7, CM = 1 */
54 0x80F10000 /* RX time = 16, RX pkts = 0, CM = 1 */
55};
56#endif
57
58
59int tulip_refill_rx(struct net_device *dev)
60{
61 struct tulip_private *tp = netdev_priv(dev);
62 int entry;
63 int refilled = 0;
64
65 /* Refill the Rx ring buffers. */
66 for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
67 entry = tp->dirty_rx % RX_RING_SIZE;
68 if (tp->rx_buffers[entry].skb == NULL) {
69 struct sk_buff *skb;
70 dma_addr_t mapping;
71
Pradeep A Dalvi21a4e462012-02-05 02:50:10 +000072 skb = tp->rx_buffers[entry].skb =
73 netdev_alloc_skb(dev, PKT_BUF_SZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -070074 if (skb == NULL)
75 break;
76
David S. Miller689be432005-06-28 15:25:31 -070077 mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ,
Linus Torvalds1da177e2005-04-16 15:20:36 -070078 PCI_DMA_FROMDEVICE);
Neil Hormanc9bfbb32013-06-13 15:31:28 -040079 if (dma_mapping_error(&tp->pdev->dev, mapping)) {
80 dev_kfree_skb(skb);
81 tp->rx_buffers[entry].skb = NULL;
82 break;
83 }
84
Linus Torvalds1da177e2005-04-16 15:20:36 -070085 tp->rx_buffers[entry].mapping = mapping;
86
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
88 refilled++;
89 }
90 tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
91 }
92 if(tp->chip_id == LC82C168) {
93 if(((ioread32(tp->base_addr + CSR5)>>17)&0x07) == 4) {
94 /* Rx stopped due to out of buffers,
95 * restart it
96 */
97 iowrite32(0x01, tp->base_addr + CSR2);
98 }
99 }
100 return refilled;
101}
102
103#ifdef CONFIG_TULIP_NAPI
104
Kees Cooka8c22a22017-10-16 17:29:05 -0700105void oom_timer(struct timer_list *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106{
Kees Cooka8c22a22017-10-16 17:29:05 -0700107 struct tulip_private *tp = from_timer(tp, t, oom_timer);
108
Ben Hutchings288379f2009-01-19 16:43:59 -0800109 napi_schedule(&tp->napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110}
111
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700112int tulip_poll(struct napi_struct *napi, int budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113{
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700114 struct tulip_private *tp = container_of(napi, struct tulip_private, napi);
115 struct net_device *dev = tp->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116 int entry = tp->cur_rx % RX_RING_SIZE;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700117 int work_done = 0;
118#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 int received = 0;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700120#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
123
124/* that one buffer is needed for mit activation; or might be a
125 bug in the ring buffer code; check later -- JHS*/
126
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700127 if (budget >=RX_RING_SIZE) budget--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128#endif
129
130 if (tulip_debug > 4)
Joe Perches726b65a2011-05-09 09:45:22 +0000131 netdev_dbg(dev, " In tulip_rx(), entry %d %08x\n",
132 entry, tp->rx_ring[entry].status);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133
134 do {
135 if (ioread32(tp->base_addr + CSR5) == 0xffffffff) {
Joe Perches726b65a2011-05-09 09:45:22 +0000136 netdev_dbg(dev, " In tulip_poll(), hardware disappeared\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 break;
138 }
139 /* Acknowledge current RX interrupt sources. */
140 iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400141
142
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143 /* If we own the next entry, it is a new packet. Send it up. */
144 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
145 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
Tomasz Lemiech1f8ae0a2009-03-13 15:43:38 -0700146 short pkt_len;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400147
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
149 break;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400150
Joe Perches726b65a2011-05-09 09:45:22 +0000151 if (tulip_debug > 5)
152 netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n",
153 entry, status);
Stephen Hemmingerc6a1b622008-01-07 00:23:04 -0800154
155 if (++work_done >= budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 goto not_done;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400157
Tomasz Lemiech1f8ae0a2009-03-13 15:43:38 -0700158 /*
159 * Omit the four octet CRC from the length.
160 * (May not be considered valid until we have
161 * checked status for RxLengthOver2047 bits)
162 */
163 pkt_len = ((status >> 16) & 0x7ff) - 4;
164
165 /*
166 * Maximum pkt_len is 1518 (1514 + vlan header)
167 * Anything higher than this is always invalid
168 * regardless of RxLengthOver2047 bits
169 */
170
171 if ((status & (RxLengthOver2047 |
172 RxDescCRCError |
173 RxDescCollisionSeen |
174 RxDescRunt |
175 RxDescDescErr |
Joe Perches8e95a202009-12-03 07:58:21 +0000176 RxWholePkt)) != RxWholePkt ||
177 pkt_len > 1518) {
Tomasz Lemiech1f8ae0a2009-03-13 15:43:38 -0700178 if ((status & (RxLengthOver2047 |
179 RxWholePkt)) != RxWholePkt) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 /* Ingore earlier buffers. */
181 if ((status & 0xffff) != 0x7fff) {
182 if (tulip_debug > 1)
Joe Perchesabe02af2010-01-28 20:59:22 +0000183 dev_warn(&dev->dev,
184 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
185 status);
Eric Dumazet1a18aba2010-09-03 03:52:43 +0000186 dev->stats.rx_length_errors++;
187 }
Tomasz Lemiech1f8ae0a2009-03-13 15:43:38 -0700188 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 /* There was a fatal error. */
Joe Perches726b65a2011-05-09 09:45:22 +0000190 if (tulip_debug > 2)
191 netdev_dbg(dev, "Receive error, Rx status %08x\n",
192 status);
Eric Dumazet1a18aba2010-09-03 03:52:43 +0000193 dev->stats.rx_errors++; /* end of a packet.*/
194 if (pkt_len > 1518 ||
195 (status & RxDescRunt))
196 dev->stats.rx_length_errors++;
Tomasz Lemiech1f8ae0a2009-03-13 15:43:38 -0700197
Eric Dumazet1a18aba2010-09-03 03:52:43 +0000198 if (status & 0x0004)
199 dev->stats.rx_frame_errors++;
200 if (status & 0x0002)
201 dev->stats.rx_crc_errors++;
202 if (status & 0x0001)
203 dev->stats.rx_fifo_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 }
205 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 struct sk_buff *skb;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400207
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 /* Check if the packet is long enough to accept without copying
209 to a minimally-sized skbuff. */
Joe Perches8e95a202009-12-03 07:58:21 +0000210 if (pkt_len < tulip_rx_copybreak &&
Pradeep A Dalvi21a4e462012-02-05 02:50:10 +0000211 (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 skb_reserve(skb, 2); /* 16 byte align the IP header */
213 pci_dma_sync_single_for_cpu(tp->pdev,
214 tp->rx_buffers[entry].mapping,
215 pkt_len, PCI_DMA_FROMDEVICE);
216#if ! defined(__alpha__)
David S. Miller8c7b7fa2007-07-10 22:08:12 -0700217 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
218 pkt_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 skb_put(skb, pkt_len);
220#else
Johannes Berg59ae1d12017-06-16 14:29:20 +0200221 skb_put_data(skb,
222 tp->rx_buffers[entry].skb->data,
223 pkt_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224#endif
225 pci_dma_sync_single_for_device(tp->pdev,
226 tp->rx_buffers[entry].mapping,
227 pkt_len, PCI_DMA_FROMDEVICE);
228 } else { /* Pass up the skb already on the Rx ring. */
229 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
230 pkt_len);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400231
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232#ifndef final_version
233 if (tp->rx_buffers[entry].mapping !=
234 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
Joe Perchesabe02af2010-01-28 20:59:22 +0000235 dev_err(&dev->dev,
236 "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %08llx %p / %p\n",
237 le32_to_cpu(tp->rx_ring[entry].buffer1),
238 (unsigned long long)tp->rx_buffers[entry].mapping,
239 skb->head, temp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 }
241#endif
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400242
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
244 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400245
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 tp->rx_buffers[entry].skb = NULL;
247 tp->rx_buffers[entry].mapping = 0;
248 }
249 skb->protocol = eth_type_trans(skb, dev);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400250
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 netif_receive_skb(skb);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400252
Eric Dumazet1a18aba2010-09-03 03:52:43 +0000253 dev->stats.rx_packets++;
254 dev->stats.rx_bytes += pkt_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700256#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
257 received++;
258#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259
260 entry = (++tp->cur_rx) % RX_RING_SIZE;
261 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4)
262 tulip_refill_rx(dev);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400263
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264 }
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400265
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 /* New ack strategy... irq does not ack Rx any longer
267 hopefully this helps */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400268
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 /* Really bad things can happen here... If new packet arrives
270 * and an irq arrives (tx or just due to occasionally unset
271 * mask), it will be acked by irq handler, but new thread
272 * is not scheduled. It is major hole in design.
273 * No idea how to fix this if "playing with fire" will fail
274 * tomorrow (night 011029). If it will not fail, we won
275 * finally: amount of IO did not increase at all. */
276 } while ((ioread32(tp->base_addr + CSR5) & RxIntr));
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400277
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400279
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280 /* We use this simplistic scheme for IM. It's proven by
281 real life installations. We can have IM enabled
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400282 continuesly but this would cause unnecessary latency.
283 Unfortunely we can't use all the NET_RX_* feedback here.
284 This would turn on IM for devices that is not contributing
285 to backlog congestion with unnecessary latency.
286
Michael Opdenacker59c51592007-05-09 08:57:56 +0200287 We monitor the device RX-ring and have:
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400288
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 HW Interrupt Mitigation either ON or OFF.
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400290
291 ON: More then 1 pkt received (per intr.) OR we are dropping
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 OFF: Only 1 pkt received
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400293
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 Note. We only use min and max (0, 15) settings from mit_table */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400295
296
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 if( tp->flags & HAS_INTR_MITIGATION) {
298 if( received > 1 ) {
299 if( ! tp->mit_on ) {
300 tp->mit_on = 1;
301 iowrite32(mit_table[MIT_TABLE], tp->base_addr + CSR11);
302 }
303 }
304 else {
305 if( tp->mit_on ) {
306 tp->mit_on = 0;
307 iowrite32(0, tp->base_addr + CSR11);
308 }
309 }
310 }
311
312#endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400313
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 tulip_refill_rx(dev);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400315
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 /* If RX ring is not full we are out of memory. */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700317 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
318 goto oom;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400319
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 /* Remove us from polling list and enable RX intr. */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400321
Eric Dumazet6ad20162017-01-30 08:22:01 -0800322 napi_complete_done(napi, work_done);
323 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400324
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 /* The last op happens after poll completion. Which means the following:
326 * 1. it can race with disabling irqs in irq handler
327 * 2. it can race with dise/enabling irqs in other poll threads
328 * 3. if an irq raised after beginning loop, it will be immediately
329 * triggered here.
330 *
331 * Summarizing: the logic results in some redundant irqs both
332 * due to races in masking and due to too late acking of already
333 * processed irqs. But it must not result in losing events.
334 */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400335
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700336 return work_done;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400337
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 not_done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 ||
340 tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
341 tulip_refill_rx(dev);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400342
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700343 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
344 goto oom;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400345
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700346 return work_done;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400347
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 oom: /* Executed with RX ints disabled */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400349
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 /* Start timer, stop polling, but do not enable rx interrupts. */
351 mod_timer(&tp->oom_timer, jiffies+1);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400352
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 /* Think: timer_pending() was an explicit signature of bug.
354 * Timer can be pending now but fired and completed
Ben Hutchings288379f2009-01-19 16:43:59 -0800355 * before we did napi_complete(). See? We would lose it. */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400356
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 /* remove ourselves from the polling list */
Eric Dumazet6ad20162017-01-30 08:22:01 -0800358 napi_complete_done(napi, work_done);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400359
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700360 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361}
362
363#else /* CONFIG_TULIP_NAPI */
364
365static int tulip_rx(struct net_device *dev)
366{
367 struct tulip_private *tp = netdev_priv(dev);
368 int entry = tp->cur_rx % RX_RING_SIZE;
369 int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
370 int received = 0;
371
372 if (tulip_debug > 4)
Joe Perches726b65a2011-05-09 09:45:22 +0000373 netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n",
374 entry, tp->rx_ring[entry].status);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 /* If we own the next entry, it is a new packet. Send it up. */
376 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
377 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
Tomasz Lemiech1f8ae0a2009-03-13 15:43:38 -0700378 short pkt_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379
380 if (tulip_debug > 5)
Joe Perches726b65a2011-05-09 09:45:22 +0000381 netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n",
382 entry, status);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 if (--rx_work_limit < 0)
384 break;
Tomasz Lemiech1f8ae0a2009-03-13 15:43:38 -0700385
386 /*
387 Omit the four octet CRC from the length.
388 (May not be considered valid until we have
389 checked status for RxLengthOver2047 bits)
390 */
391 pkt_len = ((status >> 16) & 0x7ff) - 4;
392 /*
393 Maximum pkt_len is 1518 (1514 + vlan header)
394 Anything higher than this is always invalid
395 regardless of RxLengthOver2047 bits
396 */
397
398 if ((status & (RxLengthOver2047 |
399 RxDescCRCError |
400 RxDescCollisionSeen |
401 RxDescRunt |
402 RxDescDescErr |
Joe Perches8e95a202009-12-03 07:58:21 +0000403 RxWholePkt)) != RxWholePkt ||
404 pkt_len > 1518) {
Tomasz Lemiech1f8ae0a2009-03-13 15:43:38 -0700405 if ((status & (RxLengthOver2047 |
406 RxWholePkt)) != RxWholePkt) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 /* Ingore earlier buffers. */
408 if ((status & 0xffff) != 0x7fff) {
409 if (tulip_debug > 1)
Joe Perches726b65a2011-05-09 09:45:22 +0000410 netdev_warn(dev,
411 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
412 status);
Eric Dumazet1a18aba2010-09-03 03:52:43 +0000413 dev->stats.rx_length_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 }
Tomasz Lemiech1f8ae0a2009-03-13 15:43:38 -0700415 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 /* There was a fatal error. */
417 if (tulip_debug > 2)
Joe Perches726b65a2011-05-09 09:45:22 +0000418 netdev_dbg(dev, "Receive error, Rx status %08x\n",
419 status);
Eric Dumazet1a18aba2010-09-03 03:52:43 +0000420 dev->stats.rx_errors++; /* end of a packet.*/
Tomasz Lemiech1f8ae0a2009-03-13 15:43:38 -0700421 if (pkt_len > 1518 ||
422 (status & RxDescRunt))
Eric Dumazet1a18aba2010-09-03 03:52:43 +0000423 dev->stats.rx_length_errors++;
424 if (status & 0x0004)
425 dev->stats.rx_frame_errors++;
426 if (status & 0x0002)
427 dev->stats.rx_crc_errors++;
428 if (status & 0x0001)
429 dev->stats.rx_fifo_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 }
431 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 struct sk_buff *skb;
433
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434 /* Check if the packet is long enough to accept without copying
435 to a minimally-sized skbuff. */
Joe Perches8e95a202009-12-03 07:58:21 +0000436 if (pkt_len < tulip_rx_copybreak &&
Pradeep A Dalvi21a4e462012-02-05 02:50:10 +0000437 (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 skb_reserve(skb, 2); /* 16 byte align the IP header */
439 pci_dma_sync_single_for_cpu(tp->pdev,
440 tp->rx_buffers[entry].mapping,
441 pkt_len, PCI_DMA_FROMDEVICE);
442#if ! defined(__alpha__)
David S. Miller8c7b7fa2007-07-10 22:08:12 -0700443 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
444 pkt_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 skb_put(skb, pkt_len);
446#else
Johannes Berg59ae1d12017-06-16 14:29:20 +0200447 skb_put_data(skb,
448 tp->rx_buffers[entry].skb->data,
449 pkt_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450#endif
451 pci_dma_sync_single_for_device(tp->pdev,
452 tp->rx_buffers[entry].mapping,
453 pkt_len, PCI_DMA_FROMDEVICE);
454 } else { /* Pass up the skb already on the Rx ring. */
455 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
456 pkt_len);
457
458#ifndef final_version
459 if (tp->rx_buffers[entry].mapping !=
460 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
Joe Perchesabe02af2010-01-28 20:59:22 +0000461 dev_err(&dev->dev,
462 "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %Lx %p / %p\n",
463 le32_to_cpu(tp->rx_ring[entry].buffer1),
464 (long long)tp->rx_buffers[entry].mapping,
465 skb->head, temp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 }
467#endif
468
469 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
470 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
471
472 tp->rx_buffers[entry].skb = NULL;
473 tp->rx_buffers[entry].mapping = 0;
474 }
475 skb->protocol = eth_type_trans(skb, dev);
476
477 netif_rx(skb);
478
Eric Dumazet1a18aba2010-09-03 03:52:43 +0000479 dev->stats.rx_packets++;
480 dev->stats.rx_bytes += pkt_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481 }
482 received++;
483 entry = (++tp->cur_rx) % RX_RING_SIZE;
484 }
485 return received;
486}
487#endif /* CONFIG_TULIP_NAPI */
488
489static inline unsigned int phy_interrupt (struct net_device *dev)
490{
491#ifdef __hppa__
492 struct tulip_private *tp = netdev_priv(dev);
493 int csr12 = ioread32(tp->base_addr + CSR12) & 0xff;
494
495 if (csr12 != tp->csr12_shadow) {
496 /* ack interrupt */
497 iowrite32(csr12 | 0x02, tp->base_addr + CSR12);
498 tp->csr12_shadow = csr12;
499 /* do link change stuff */
500 spin_lock(&tp->lock);
501 tulip_check_duplex(dev);
502 spin_unlock(&tp->lock);
503 /* clear irq ack bit */
504 iowrite32(csr12 & ~0x02, tp->base_addr + CSR12);
505
506 return 1;
507 }
508#endif
509
510 return 0;
511}
512
513/* The interrupt handler does all of the Rx thread work and cleans up
514 after the Tx thread. */
David Howells7d12e782006-10-05 14:55:46 +0100515irqreturn_t tulip_interrupt(int irq, void *dev_instance)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516{
517 struct net_device *dev = (struct net_device *)dev_instance;
518 struct tulip_private *tp = netdev_priv(dev);
519 void __iomem *ioaddr = tp->base_addr;
520 int csr5;
521 int missed;
522 int rx = 0;
523 int tx = 0;
524 int oi = 0;
525 int maxrx = RX_RING_SIZE;
526 int maxtx = TX_RING_SIZE;
527 int maxoi = TX_RING_SIZE;
528#ifdef CONFIG_TULIP_NAPI
529 int rxd = 0;
530#else
531 int entry;
532#endif
533 unsigned int work_count = tulip_max_interrupt_work;
534 unsigned int handled = 0;
535
536 /* Let's see whether the interrupt really is for us */
537 csr5 = ioread32(ioaddr + CSR5);
538
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400539 if (tp->flags & HAS_PHY_IRQ)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 handled = phy_interrupt (dev);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400541
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
543 return IRQ_RETVAL(handled);
544
545 tp->nir++;
546
547 do {
548
549#ifdef CONFIG_TULIP_NAPI
550
551 if (!rxd && (csr5 & (RxIntr | RxNoBuf))) {
552 rxd++;
553 /* Mask RX intrs and add the device to poll list. */
554 iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7);
Ben Hutchings288379f2009-01-19 16:43:59 -0800555 napi_schedule(&tp->napi);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400556
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass)))
558 break;
559 }
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400560
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 /* Acknowledge the interrupt sources we handle here ASAP
562 the poll function does Rx and RxNoBuf acking */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400563
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564 iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5);
565
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400566#else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 /* Acknowledge all of the current interrupt sources ASAP. */
568 iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5);
569
570
571 if (csr5 & (RxIntr | RxNoBuf)) {
572 rx += tulip_rx(dev);
573 tulip_refill_rx(dev);
574 }
575
576#endif /* CONFIG_TULIP_NAPI */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400577
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 if (tulip_debug > 4)
Joe Perches726b65a2011-05-09 09:45:22 +0000579 netdev_dbg(dev, "interrupt csr5=%#8.8x new csr5=%#8.8x\n",
580 csr5, ioread32(ioaddr + CSR5));
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400581
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582
583 if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
584 unsigned int dirty_tx;
585
586 spin_lock(&tp->lock);
587
588 for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
589 dirty_tx++) {
590 int entry = dirty_tx % TX_RING_SIZE;
591 int status = le32_to_cpu(tp->tx_ring[entry].status);
592
593 if (status < 0)
594 break; /* It still has not been Txed */
595
596 /* Check for Rx filter setup frames. */
597 if (tp->tx_buffers[entry].skb == NULL) {
598 /* test because dummy frames not mapped */
599 if (tp->tx_buffers[entry].mapping)
600 pci_unmap_single(tp->pdev,
601 tp->tx_buffers[entry].mapping,
602 sizeof(tp->setup_frame),
603 PCI_DMA_TODEVICE);
604 continue;
605 }
606
607 if (status & 0x8000) {
608 /* There was an major error, log it. */
609#ifndef final_version
610 if (tulip_debug > 1)
Joe Perches726b65a2011-05-09 09:45:22 +0000611 netdev_dbg(dev, "Transmit error, Tx status %08x\n",
612 status);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613#endif
Eric Dumazet1a18aba2010-09-03 03:52:43 +0000614 dev->stats.tx_errors++;
615 if (status & 0x4104)
616 dev->stats.tx_aborted_errors++;
617 if (status & 0x0C00)
618 dev->stats.tx_carrier_errors++;
619 if (status & 0x0200)
620 dev->stats.tx_window_errors++;
621 if (status & 0x0002)
622 dev->stats.tx_fifo_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 if ((status & 0x0080) && tp->full_duplex == 0)
Eric Dumazet1a18aba2010-09-03 03:52:43 +0000624 dev->stats.tx_heartbeat_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 } else {
Eric Dumazet1a18aba2010-09-03 03:52:43 +0000626 dev->stats.tx_bytes +=
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 tp->tx_buffers[entry].skb->len;
Eric Dumazet1a18aba2010-09-03 03:52:43 +0000628 dev->stats.collisions += (status >> 3) & 15;
629 dev->stats.tx_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 }
631
632 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
633 tp->tx_buffers[entry].skb->len,
634 PCI_DMA_TODEVICE);
635
636 /* Free the original skb. */
637 dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
638 tp->tx_buffers[entry].skb = NULL;
639 tp->tx_buffers[entry].mapping = 0;
640 tx++;
641 }
642
643#ifndef final_version
644 if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
Joe Perchesabe02af2010-01-28 20:59:22 +0000645 dev_err(&dev->dev,
646 "Out-of-sync dirty pointer, %d vs. %d\n",
647 dirty_tx, tp->cur_tx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 dirty_tx += TX_RING_SIZE;
649 }
650#endif
651
652 if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
653 netif_wake_queue(dev);
654
655 tp->dirty_tx = dirty_tx;
656 if (csr5 & TxDied) {
657 if (tulip_debug > 2)
Joe Perchesabe02af2010-01-28 20:59:22 +0000658 dev_warn(&dev->dev,
659 "The transmitter stopped. CSR5 is %x, CSR6 %x, new CSR6 %x\n",
660 csr5, ioread32(ioaddr + CSR6),
661 tp->csr6);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 tulip_restart_rxtx(tp);
663 }
664 spin_unlock(&tp->lock);
665 }
666
667 /* Log errors. */
668 if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */
669 if (csr5 == 0xffffffff)
670 break;
Eric Dumazet1a18aba2010-09-03 03:52:43 +0000671 if (csr5 & TxJabber)
672 dev->stats.tx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 if (csr5 & TxFIFOUnderflow) {
674 if ((tp->csr6 & 0xC000) != 0xC000)
675 tp->csr6 += 0x4000; /* Bump up the Tx threshold */
676 else
677 tp->csr6 |= 0x00200000; /* Store-n-forward. */
678 /* Restart the transmit process. */
679 tulip_restart_rxtx(tp);
680 iowrite32(0, ioaddr + CSR1);
681 }
682 if (csr5 & (RxDied | RxNoBuf)) {
683 if (tp->flags & COMET_MAC_ADDR) {
684 iowrite32(tp->mc_filter[0], ioaddr + 0xAC);
685 iowrite32(tp->mc_filter[1], ioaddr + 0xB0);
686 }
687 }
688 if (csr5 & RxDied) { /* Missed a Rx frame. */
Eric Dumazet1a18aba2010-09-03 03:52:43 +0000689 dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
690 dev->stats.rx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 tulip_start_rxtx(tp);
692 }
693 /*
694 * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
695 * call is ever done under the spinlock
696 */
697 if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
698 if (tp->link_change)
699 (tp->link_change)(dev, csr5);
700 }
Valerie Henson1ddb9862007-03-12 02:31:33 -0700701 if (csr5 & SystemError) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 int error = (csr5 >> 23) & 7;
703 /* oops, we hit a PCI error. The code produced corresponds
704 * to the reason:
705 * 0 - parity error
706 * 1 - master abort
707 * 2 - target abort
708 * Note that on parity error, we should do a software reset
709 * of the chip to get it back into a sane state (according
710 * to the 21142/3 docs that is).
711 * -- rmk
712 */
Joe Perchesabe02af2010-01-28 20:59:22 +0000713 dev_err(&dev->dev,
714 "(%lu) System Error occurred (%d)\n",
715 tp->nir, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 }
717 /* Clear all error sources, included undocumented ones! */
718 iowrite32(0x0800f7ba, ioaddr + CSR5);
719 oi++;
720 }
721 if (csr5 & TimerInt) {
722
723 if (tulip_debug > 2)
Joe Perchesabe02af2010-01-28 20:59:22 +0000724 dev_err(&dev->dev,
725 "Re-enabling interrupts, %08x\n",
726 csr5);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
728 tp->ttimer = 0;
729 oi++;
730 }
731 if (tx > maxtx || rx > maxrx || oi > maxoi) {
732 if (tulip_debug > 1)
Joe Perchesabe02af2010-01-28 20:59:22 +0000733 dev_warn(&dev->dev, "Too much work during an interrupt, csr5=0x%08x. (%lu) (%d,%d,%d)\n",
734 csr5, tp->nir, tx, rx, oi);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735
736 /* Acknowledge all interrupt sources. */
737 iowrite32(0x8001ffff, ioaddr + CSR5);
738 if (tp->flags & HAS_INTR_MITIGATION) {
739 /* Josip Loncaric at ICASE did extensive experimentation
740 to develop a good interrupt mitigation setting.*/
741 iowrite32(0x8b240000, ioaddr + CSR11);
742 } else if (tp->chip_id == LC82C168) {
743 /* the LC82C168 doesn't have a hw timer.*/
744 iowrite32(0x00, ioaddr + CSR7);
745 mod_timer(&tp->timer, RUN_AT(HZ/50));
746 } else {
747 /* Mask all interrupting sources, set timer to
748 re-enable. */
749 iowrite32(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7);
750 iowrite32(0x0012, ioaddr + CSR11);
751 }
752 break;
753 }
754
755 work_count--;
756 if (work_count == 0)
757 break;
758
759 csr5 = ioread32(ioaddr + CSR5);
760
761#ifdef CONFIG_TULIP_NAPI
762 if (rxd)
763 csr5 &= ~RxPollInt;
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400764 } while ((csr5 & (TxNoBuf |
765 TxDied |
766 TxIntr |
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767 TimerInt |
768 /* Abnormal intr. */
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400769 RxDied |
770 TxFIFOUnderflow |
771 TxJabber |
772 TPLnkFail |
Valerie Henson1ddb9862007-03-12 02:31:33 -0700773 SystemError )) != 0);
Jeff Garzikf3b197a2006-05-26 21:39:03 -0400774#else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775 } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
776
777 tulip_refill_rx(dev);
778
779 /* check if the card is in suspend mode */
780 entry = tp->dirty_rx % RX_RING_SIZE;
781 if (tp->rx_buffers[entry].skb == NULL) {
782 if (tulip_debug > 1)
Joe Perchesabe02af2010-01-28 20:59:22 +0000783 dev_warn(&dev->dev,
784 "in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n",
785 tp->nir, tp->cur_rx, tp->ttimer, rx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 if (tp->chip_id == LC82C168) {
787 iowrite32(0x00, ioaddr + CSR7);
788 mod_timer(&tp->timer, RUN_AT(HZ/50));
789 } else {
790 if (tp->ttimer == 0 || (ioread32(ioaddr + CSR11) & 0xffff) == 0) {
791 if (tulip_debug > 1)
Joe Perchesabe02af2010-01-28 20:59:22 +0000792 dev_warn(&dev->dev,
793 "in rx suspend mode: (%lu) set timer\n",
794 tp->nir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 iowrite32(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
796 ioaddr + CSR7);
797 iowrite32(TimerInt, ioaddr + CSR5);
798 iowrite32(12, ioaddr + CSR11);
799 tp->ttimer = 1;
800 }
801 }
802 }
803#endif /* CONFIG_TULIP_NAPI */
804
805 if ((missed = ioread32(ioaddr + CSR8) & 0x1ffff)) {
Eric Dumazet1a18aba2010-09-03 03:52:43 +0000806 dev->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 }
808
809 if (tulip_debug > 4)
Joe Perches726b65a2011-05-09 09:45:22 +0000810 netdev_dbg(dev, "exiting interrupt, csr5=%#04x\n",
811 ioread32(ioaddr + CSR5));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812
813 return IRQ_HANDLED;
814}