blob: c7995d77ccfc30b8fd6082163f7d02ba15a1aaa1 [file] [log] [blame]
Olof Johanssonf5cd7872007-01-31 21:43:54 -06001/*
2 * Copyright (C) 2006-2007 PA Semi, Inc
3 *
4 * Driver for the PA Semi PWRficient onchip 1G/10G Ethernet MACs
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/init.h>
21#include <linux/module.h>
22#include <linux/pci.h>
23#include <linux/interrupt.h>
24#include <linux/dmaengine.h>
25#include <linux/delay.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <asm/dma-mapping.h>
29#include <linux/in.h>
30#include <linux/skbuff.h>
31
32#include <linux/ip.h>
33#include <linux/tcp.h>
34#include <net/checksum.h>
35
Olof Johansson771f7402007-05-08 00:47:21 -050036#include <asm/irq.h>
37
Olof Johanssonf5cd7872007-01-31 21:43:54 -060038#include "pasemi_mac.h"
39
40
41/* TODO list
42 *
43 * - Get rid of pci_{read,write}_config(), map registers with ioremap
44 * for performance
45 * - PHY support
46 * - Multicast support
47 * - Large MTU support
48 * - Other performance improvements
49 */
50
51
52/* Must be a power of two */
53#define RX_RING_SIZE 512
54#define TX_RING_SIZE 512
55
56#define TX_DESC(mac, num) ((mac)->tx->desc[(num) & (TX_RING_SIZE-1)])
57#define TX_DESC_INFO(mac, num) ((mac)->tx->desc_info[(num) & (TX_RING_SIZE-1)])
58#define RX_DESC(mac, num) ((mac)->rx->desc[(num) & (RX_RING_SIZE-1)])
59#define RX_DESC_INFO(mac, num) ((mac)->rx->desc_info[(num) & (RX_RING_SIZE-1)])
60#define RX_BUFF(mac, num) ((mac)->rx->buffers[(num) & (RX_RING_SIZE-1)])
61
62#define BUF_SIZE 1646 /* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
63
64/* XXXOJN these should come out of the device tree some day */
65#define PAS_DMA_CAP_BASE 0xe00d0040
66#define PAS_DMA_CAP_SIZE 0x100
67#define PAS_DMA_COM_BASE 0xe00d0100
68#define PAS_DMA_COM_SIZE 0x100
69
70static struct pasdma_status *dma_status;
71
72static int pasemi_get_mac_addr(struct pasemi_mac *mac)
73{
74 struct pci_dev *pdev = mac->pdev;
75 struct device_node *dn = pci_device_to_OF_node(pdev);
76 const u8 *maddr;
77 u8 addr[6];
78
79 if (!dn) {
80 dev_dbg(&pdev->dev,
81 "No device node for mac, not configuring\n");
82 return -ENOENT;
83 }
84
85 maddr = get_property(dn, "mac-address", NULL);
86 if (maddr == NULL) {
87 dev_warn(&pdev->dev,
88 "no mac address in device tree, not configuring\n");
89 return -ENOENT;
90 }
91
92 if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &addr[0],
93 &addr[1], &addr[2], &addr[3], &addr[4], &addr[5]) != 6) {
94 dev_warn(&pdev->dev,
95 "can't parse mac address, not configuring\n");
96 return -EINVAL;
97 }
98
99 memcpy(mac->mac_addr, addr, sizeof(addr));
100 return 0;
101}
102
103static int pasemi_mac_setup_rx_resources(struct net_device *dev)
104{
105 struct pasemi_mac_rxring *ring;
106 struct pasemi_mac *mac = netdev_priv(dev);
107 int chan_id = mac->dma_rxch;
108
109 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
110
111 if (!ring)
112 goto out_ring;
113
114 spin_lock_init(&ring->lock);
115
116 ring->desc_info = kzalloc(sizeof(struct pasemi_mac_buffer) *
117 RX_RING_SIZE, GFP_KERNEL);
118
119 if (!ring->desc_info)
120 goto out_desc_info;
121
122 /* Allocate descriptors */
123 ring->desc = dma_alloc_coherent(&mac->dma_pdev->dev,
124 RX_RING_SIZE *
125 sizeof(struct pas_dma_xct_descr),
126 &ring->dma, GFP_KERNEL);
127
128 if (!ring->desc)
129 goto out_desc;
130
131 memset(ring->desc, 0, RX_RING_SIZE * sizeof(struct pas_dma_xct_descr));
132
133 ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev,
134 RX_RING_SIZE * sizeof(u64),
135 &ring->buf_dma, GFP_KERNEL);
136 if (!ring->buffers)
137 goto out_buffers;
138
139 memset(ring->buffers, 0, RX_RING_SIZE * sizeof(u64));
140
141 pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXCHAN_BASEL(chan_id),
142 PAS_DMA_RXCHAN_BASEL_BRBL(ring->dma));
143
144 pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXCHAN_BASEU(chan_id),
145 PAS_DMA_RXCHAN_BASEU_BRBH(ring->dma >> 32) |
146 PAS_DMA_RXCHAN_BASEU_SIZ(RX_RING_SIZE >> 2));
147
148 pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXCHAN_CFG(chan_id),
149 PAS_DMA_RXCHAN_CFG_HBU(1));
150
151 pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXINT_BASEL(mac->dma_if),
152 PAS_DMA_RXINT_BASEL_BRBL(__pa(ring->buffers)));
153
154 pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXINT_BASEU(mac->dma_if),
155 PAS_DMA_RXINT_BASEU_BRBH(__pa(ring->buffers) >> 32) |
156 PAS_DMA_RXINT_BASEU_SIZ(RX_RING_SIZE >> 3));
157
158 ring->next_to_fill = 0;
159 ring->next_to_clean = 0;
160
161 snprintf(ring->irq_name, sizeof(ring->irq_name),
162 "%s rx", dev->name);
163 mac->rx = ring;
164
165 return 0;
166
167out_buffers:
168 dma_free_coherent(&mac->dma_pdev->dev,
169 RX_RING_SIZE * sizeof(struct pas_dma_xct_descr),
170 mac->rx->desc, mac->rx->dma);
171out_desc:
172 kfree(ring->desc_info);
173out_desc_info:
174 kfree(ring);
175out_ring:
176 return -ENOMEM;
177}
178
179
180static int pasemi_mac_setup_tx_resources(struct net_device *dev)
181{
182 struct pasemi_mac *mac = netdev_priv(dev);
183 u32 val;
184 int chan_id = mac->dma_txch;
185 struct pasemi_mac_txring *ring;
186
187 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
188 if (!ring)
189 goto out_ring;
190
191 spin_lock_init(&ring->lock);
192
193 ring->desc_info = kzalloc(sizeof(struct pasemi_mac_buffer) *
194 TX_RING_SIZE, GFP_KERNEL);
195 if (!ring->desc_info)
196 goto out_desc_info;
197
198 /* Allocate descriptors */
199 ring->desc = dma_alloc_coherent(&mac->dma_pdev->dev,
200 TX_RING_SIZE *
201 sizeof(struct pas_dma_xct_descr),
202 &ring->dma, GFP_KERNEL);
203 if (!ring->desc)
204 goto out_desc;
205
206 memset(ring->desc, 0, TX_RING_SIZE * sizeof(struct pas_dma_xct_descr));
207
208 pci_write_config_dword(mac->dma_pdev, PAS_DMA_TXCHAN_BASEL(chan_id),
209 PAS_DMA_TXCHAN_BASEL_BRBL(ring->dma));
210 val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->dma >> 32);
211 val |= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE >> 2);
212
213 pci_write_config_dword(mac->dma_pdev, PAS_DMA_TXCHAN_BASEU(chan_id), val);
214
215 pci_write_config_dword(mac->dma_pdev, PAS_DMA_TXCHAN_CFG(chan_id),
216 PAS_DMA_TXCHAN_CFG_TY_IFACE |
217 PAS_DMA_TXCHAN_CFG_TATTR(mac->dma_if) |
218 PAS_DMA_TXCHAN_CFG_UP |
219 PAS_DMA_TXCHAN_CFG_WT(2));
220
221 ring->next_to_use = 0;
222 ring->next_to_clean = 0;
223
224 snprintf(ring->irq_name, sizeof(ring->irq_name),
225 "%s tx", dev->name);
226 mac->tx = ring;
227
228 return 0;
229
230out_desc:
231 kfree(ring->desc_info);
232out_desc_info:
233 kfree(ring);
234out_ring:
235 return -ENOMEM;
236}
237
238static void pasemi_mac_free_tx_resources(struct net_device *dev)
239{
240 struct pasemi_mac *mac = netdev_priv(dev);
241 unsigned int i;
242 struct pasemi_mac_buffer *info;
243 struct pas_dma_xct_descr *dp;
244
245 for (i = 0; i < TX_RING_SIZE; i++) {
246 info = &TX_DESC_INFO(mac, i);
247 dp = &TX_DESC(mac, i);
248 if (info->dma) {
249 if (info->skb) {
250 pci_unmap_single(mac->dma_pdev,
251 info->dma,
252 info->skb->len,
253 PCI_DMA_TODEVICE);
254 dev_kfree_skb_any(info->skb);
255 }
256 info->dma = 0;
257 info->skb = NULL;
258 dp->mactx = 0;
259 dp->ptr = 0;
260 }
261 }
262
263 dma_free_coherent(&mac->dma_pdev->dev,
264 TX_RING_SIZE * sizeof(struct pas_dma_xct_descr),
265 mac->tx->desc, mac->tx->dma);
266
267 kfree(mac->tx->desc_info);
268 kfree(mac->tx);
269 mac->tx = NULL;
270}
271
272static void pasemi_mac_free_rx_resources(struct net_device *dev)
273{
274 struct pasemi_mac *mac = netdev_priv(dev);
275 unsigned int i;
276 struct pasemi_mac_buffer *info;
277 struct pas_dma_xct_descr *dp;
278
279 for (i = 0; i < RX_RING_SIZE; i++) {
280 info = &RX_DESC_INFO(mac, i);
281 dp = &RX_DESC(mac, i);
282 if (info->dma) {
283 if (info->skb) {
284 pci_unmap_single(mac->dma_pdev,
285 info->dma,
286 info->skb->len,
287 PCI_DMA_FROMDEVICE);
288 dev_kfree_skb_any(info->skb);
289 }
290 info->dma = 0;
291 info->skb = NULL;
292 dp->macrx = 0;
293 dp->ptr = 0;
294 }
295 }
296
297 dma_free_coherent(&mac->dma_pdev->dev,
298 RX_RING_SIZE * sizeof(struct pas_dma_xct_descr),
299 mac->rx->desc, mac->rx->dma);
300
301 dma_free_coherent(&mac->dma_pdev->dev, RX_RING_SIZE * sizeof(u64),
302 mac->rx->buffers, mac->rx->buf_dma);
303
304 kfree(mac->rx->desc_info);
305 kfree(mac->rx);
306 mac->rx = NULL;
307}
308
309static void pasemi_mac_replenish_rx_ring(struct net_device *dev)
310{
311 struct pasemi_mac *mac = netdev_priv(dev);
312 unsigned int i;
313 int start = mac->rx->next_to_fill;
314 unsigned int count;
315
316 count = (mac->rx->next_to_clean + RX_RING_SIZE -
317 mac->rx->next_to_fill) & (RX_RING_SIZE - 1);
318
319 /* Check to see if we're doing first-time setup */
320 if (unlikely(mac->rx->next_to_clean == 0 && mac->rx->next_to_fill == 0))
321 count = RX_RING_SIZE;
322
323 if (count <= 0)
324 return;
325
326 for (i = start; i < start + count; i++) {
327 struct pasemi_mac_buffer *info = &RX_DESC_INFO(mac, i);
328 u64 *buff = &RX_BUFF(mac, i);
329 struct sk_buff *skb;
330 dma_addr_t dma;
331
332 skb = dev_alloc_skb(BUF_SIZE);
333
334 if (!skb) {
335 count = i - start;
336 break;
337 }
338
Olof Johanssonf5cd7872007-01-31 21:43:54 -0600339 dma = pci_map_single(mac->dma_pdev, skb->data, skb->len,
340 PCI_DMA_FROMDEVICE);
341
342 if (dma_mapping_error(dma)) {
343 dev_kfree_skb_irq(info->skb);
344 count = i - start;
345 break;
346 }
347
348 info->skb = skb;
349 info->dma = dma;
350 *buff = XCT_RXB_LEN(BUF_SIZE) | XCT_RXB_ADDR(dma);
351 }
352
353 wmb();
354
355 pci_write_config_dword(mac->dma_pdev,
356 PAS_DMA_RXCHAN_INCR(mac->dma_rxch),
357 count);
358 pci_write_config_dword(mac->dma_pdev,
359 PAS_DMA_RXINT_INCR(mac->dma_if),
360 count);
361
362 mac->rx->next_to_fill += count;
363}
364
365static int pasemi_mac_clean_rx(struct pasemi_mac *mac, int limit)
366{
367 unsigned int i;
368 int start, count;
369
370 spin_lock(&mac->rx->lock);
371
372 start = mac->rx->next_to_clean;
373 count = 0;
374
375 for (i = start; i < (start + RX_RING_SIZE) && count < limit; i++) {
376 struct pas_dma_xct_descr *dp;
377 struct pasemi_mac_buffer *info;
378 struct sk_buff *skb;
379 unsigned int j, len;
380 dma_addr_t dma;
381
382 rmb();
383
384 dp = &RX_DESC(mac, i);
385
386 if (!(dp->macrx & XCT_MACRX_O))
387 break;
388
389 count++;
390
391 info = NULL;
392
393 /* We have to scan for our skb since there's no way
394 * to back-map them from the descriptor, and if we
395 * have several receive channels then they might not
396 * show up in the same order as they were put on the
397 * interface ring.
398 */
399
400 dma = (dp->ptr & XCT_PTR_ADDR_M);
401 for (j = start; j < (start + RX_RING_SIZE); j++) {
402 info = &RX_DESC_INFO(mac, j);
403 if (info->dma == dma)
404 break;
405 }
406
407 BUG_ON(!info);
408 BUG_ON(info->dma != dma);
409
410 pci_unmap_single(mac->dma_pdev, info->dma, info->skb->len,
411 PCI_DMA_FROMDEVICE);
412
413 skb = info->skb;
414
415 len = (dp->macrx & XCT_MACRX_LLEN_M) >> XCT_MACRX_LLEN_S;
416
417 skb_put(skb, len);
418
419 skb->protocol = eth_type_trans(skb, mac->netdev);
420
421 if ((dp->macrx & XCT_MACRX_HTY_M) == XCT_MACRX_HTY_IPV4_OK) {
422 skb->ip_summed = CHECKSUM_COMPLETE;
423 skb->csum = (dp->macrx & XCT_MACRX_CSUM_M) >>
424 XCT_MACRX_CSUM_S;
425 } else
426 skb->ip_summed = CHECKSUM_NONE;
427
428 mac->stats.rx_bytes += len;
429 mac->stats.rx_packets++;
430
431 netif_receive_skb(skb);
432
433 info->dma = 0;
434 info->skb = NULL;
435 dp->ptr = 0;
436 dp->macrx = 0;
437 }
438
439 mac->rx->next_to_clean += count;
440 pasemi_mac_replenish_rx_ring(mac->netdev);
441
442 spin_unlock(&mac->rx->lock);
443
444 return count;
445}
446
447static int pasemi_mac_clean_tx(struct pasemi_mac *mac)
448{
449 int i;
450 struct pasemi_mac_buffer *info;
451 struct pas_dma_xct_descr *dp;
452 int start, count;
453 int flags;
454
455 spin_lock_irqsave(&mac->tx->lock, flags);
456
457 start = mac->tx->next_to_clean;
458 count = 0;
459
460 for (i = start; i < mac->tx->next_to_use; i++) {
461 dp = &TX_DESC(mac, i);
462 if (!dp || (dp->mactx & XCT_MACTX_O))
463 break;
464
465 count++;
466
467 info = &TX_DESC_INFO(mac, i);
468
469 pci_unmap_single(mac->dma_pdev, info->dma,
470 info->skb->len, PCI_DMA_TODEVICE);
471 dev_kfree_skb_irq(info->skb);
472
473 info->skb = NULL;
474 info->dma = 0;
475 dp->mactx = 0;
476 dp->ptr = 0;
477 }
478 mac->tx->next_to_clean += count;
479 spin_unlock_irqrestore(&mac->tx->lock, flags);
480
Olof Johansson0ce68c72007-04-28 15:36:40 -0500481 netif_wake_queue(mac->netdev);
482
Olof Johanssonf5cd7872007-01-31 21:43:54 -0600483 return count;
484}
485
486
487static irqreturn_t pasemi_mac_rx_intr(int irq, void *data)
488{
489 struct net_device *dev = data;
490 struct pasemi_mac *mac = netdev_priv(dev);
491 unsigned int reg;
492
493 if (!(*mac->rx_status & PAS_STATUS_INT))
494 return IRQ_NONE;
495
496 netif_rx_schedule(dev);
497 pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_COM_TIMEOUTCFG,
498 PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(0));
499
500 reg = PAS_IOB_DMA_RXCH_RESET_PINTC | PAS_IOB_DMA_RXCH_RESET_SINTC |
501 PAS_IOB_DMA_RXCH_RESET_DINTC;
502 if (*mac->rx_status & PAS_STATUS_TIMER)
503 reg |= PAS_IOB_DMA_RXCH_RESET_TINTC;
504
505 pci_write_config_dword(mac->iob_pdev,
506 PAS_IOB_DMA_RXCH_RESET(mac->dma_rxch), reg);
507
508
509 return IRQ_HANDLED;
510}
511
512static irqreturn_t pasemi_mac_tx_intr(int irq, void *data)
513{
514 struct net_device *dev = data;
515 struct pasemi_mac *mac = netdev_priv(dev);
516 unsigned int reg;
Olof Johanssonf5cd7872007-01-31 21:43:54 -0600517
518 if (!(*mac->tx_status & PAS_STATUS_INT))
519 return IRQ_NONE;
520
521 pasemi_mac_clean_tx(mac);
522
523 reg = PAS_IOB_DMA_TXCH_RESET_PINTC | PAS_IOB_DMA_TXCH_RESET_SINTC;
524 if (*mac->tx_status & PAS_STATUS_TIMER)
525 reg |= PAS_IOB_DMA_TXCH_RESET_TINTC;
526
527 pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_TXCH_RESET(mac->dma_txch),
528 reg);
529
Olof Johanssonf5cd7872007-01-31 21:43:54 -0600530 return IRQ_HANDLED;
531}
532
533static int pasemi_mac_open(struct net_device *dev)
534{
535 struct pasemi_mac *mac = netdev_priv(dev);
Olof Johansson771f7402007-05-08 00:47:21 -0500536 int base_irq;
Olof Johanssonf5cd7872007-01-31 21:43:54 -0600537 unsigned int flags;
538 int ret;
539
540 /* enable rx section */
541 pci_write_config_dword(mac->dma_pdev, PAS_DMA_COM_RXCMD,
542 PAS_DMA_COM_RXCMD_EN);
543
544 /* enable tx section */
545 pci_write_config_dword(mac->dma_pdev, PAS_DMA_COM_TXCMD,
546 PAS_DMA_COM_TXCMD_EN);
547
548 flags = PAS_MAC_CFG_TXP_FCE | PAS_MAC_CFG_TXP_FPC(3) |
549 PAS_MAC_CFG_TXP_SL(3) | PAS_MAC_CFG_TXP_COB(0xf) |
550 PAS_MAC_CFG_TXP_TIFT(8) | PAS_MAC_CFG_TXP_TIFG(12);
551
552 pci_write_config_dword(mac->pdev, PAS_MAC_CFG_TXP, flags);
553
554 flags = PAS_MAC_CFG_PCFG_S1 | PAS_MAC_CFG_PCFG_PE |
555 PAS_MAC_CFG_PCFG_PR | PAS_MAC_CFG_PCFG_CE;
556
557 flags |= PAS_MAC_CFG_PCFG_TSR_1G | PAS_MAC_CFG_PCFG_SPD_1G;
558
559 pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_RXCH_CFG(mac->dma_rxch),
560 PAS_IOB_DMA_RXCH_CFG_CNTTH(30));
561
562 pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_COM_TIMEOUTCFG,
563 PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(1000000));
564
565 pci_write_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, flags);
566
567 ret = pasemi_mac_setup_rx_resources(dev);
568 if (ret)
569 goto out_rx_resources;
570
571 ret = pasemi_mac_setup_tx_resources(dev);
572 if (ret)
573 goto out_tx_resources;
574
575 pci_write_config_dword(mac->pdev, PAS_MAC_IPC_CHNL,
576 PAS_MAC_IPC_CHNL_DCHNO(mac->dma_rxch) |
577 PAS_MAC_IPC_CHNL_BCH(mac->dma_rxch));
578
579 /* enable rx if */
580 pci_write_config_dword(mac->dma_pdev,
581 PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
582 PAS_DMA_RXINT_RCMDSTA_EN);
583
584 /* enable rx channel */
585 pci_write_config_dword(mac->dma_pdev,
586 PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch),
587 PAS_DMA_RXCHAN_CCMDSTA_EN |
588 PAS_DMA_RXCHAN_CCMDSTA_DU);
589
590 /* enable tx channel */
591 pci_write_config_dword(mac->dma_pdev,
592 PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch),
593 PAS_DMA_TXCHAN_TCMDSTA_EN);
594
595 pasemi_mac_replenish_rx_ring(dev);
596
597 netif_start_queue(dev);
598 netif_poll_enable(dev);
599
Olof Johansson771f7402007-05-08 00:47:21 -0500600 /* Interrupts are a bit different for our DMA controller: While
601 * it's got one a regular PCI device header, the interrupt there
602 * is really the base of the range it's using. Each tx and rx
603 * channel has it's own interrupt source.
604 */
605
606 base_irq = virq_to_hw(mac->dma_pdev->irq);
607
608 mac->tx_irq = irq_create_mapping(NULL, base_irq + mac->dma_txch);
609 mac->rx_irq = irq_create_mapping(NULL, base_irq + 20 + mac->dma_txch);
610
611 ret = request_irq(mac->tx_irq, &pasemi_mac_tx_intr, IRQF_DISABLED,
Olof Johanssonf5cd7872007-01-31 21:43:54 -0600612 mac->tx->irq_name, dev);
613 if (ret) {
614 dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
Olof Johansson771f7402007-05-08 00:47:21 -0500615 base_irq + mac->dma_txch, ret);
Olof Johanssonf5cd7872007-01-31 21:43:54 -0600616 goto out_tx_int;
617 }
618
Olof Johansson771f7402007-05-08 00:47:21 -0500619 ret = request_irq(mac->rx_irq, &pasemi_mac_rx_intr, IRQF_DISABLED,
Olof Johanssonf5cd7872007-01-31 21:43:54 -0600620 mac->rx->irq_name, dev);
621 if (ret) {
622 dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
Olof Johansson771f7402007-05-08 00:47:21 -0500623 base_irq + 20 + mac->dma_rxch, ret);
Olof Johanssonf5cd7872007-01-31 21:43:54 -0600624 goto out_rx_int;
625 }
626
627 return 0;
628
629out_rx_int:
Olof Johansson771f7402007-05-08 00:47:21 -0500630 free_irq(mac->tx_irq, dev);
Olof Johanssonf5cd7872007-01-31 21:43:54 -0600631out_tx_int:
632 netif_poll_disable(dev);
633 netif_stop_queue(dev);
634 pasemi_mac_free_tx_resources(dev);
635out_tx_resources:
636 pasemi_mac_free_rx_resources(dev);
637out_rx_resources:
638
639 return ret;
640}
641
642#define MAX_RETRIES 5000
643
644static int pasemi_mac_close(struct net_device *dev)
645{
646 struct pasemi_mac *mac = netdev_priv(dev);
647 unsigned int stat;
648 int retries;
649
650 netif_stop_queue(dev);
651
652 /* Clean out any pending buffers */
653 pasemi_mac_clean_tx(mac);
654 pasemi_mac_clean_rx(mac, RX_RING_SIZE);
655
656 /* Disable interface */
657 pci_write_config_dword(mac->dma_pdev,
658 PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch),
659 PAS_DMA_TXCHAN_TCMDSTA_ST);
660 pci_write_config_dword(mac->dma_pdev,
661 PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
662 PAS_DMA_RXINT_RCMDSTA_ST);
663 pci_write_config_dword(mac->dma_pdev,
664 PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch),
665 PAS_DMA_RXCHAN_CCMDSTA_ST);
666
667 for (retries = 0; retries < MAX_RETRIES; retries++) {
668 pci_read_config_dword(mac->dma_pdev,
669 PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch),
670 &stat);
Olof Johansson0ce68c72007-04-28 15:36:40 -0500671 if (!(stat & PAS_DMA_TXCHAN_TCMDSTA_ACT))
Olof Johanssonf5cd7872007-01-31 21:43:54 -0600672 break;
673 cond_resched();
674 }
675
Olof Johansson0ce68c72007-04-28 15:36:40 -0500676 if (stat & PAS_DMA_TXCHAN_TCMDSTA_ACT)
Olof Johanssonf5cd7872007-01-31 21:43:54 -0600677 dev_err(&mac->dma_pdev->dev, "Failed to stop tx channel\n");
Olof Johanssonf5cd7872007-01-31 21:43:54 -0600678
679 for (retries = 0; retries < MAX_RETRIES; retries++) {
680 pci_read_config_dword(mac->dma_pdev,
681 PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch),
682 &stat);
Olof Johansson0ce68c72007-04-28 15:36:40 -0500683 if (!(stat & PAS_DMA_RXCHAN_CCMDSTA_ACT))
Olof Johanssonf5cd7872007-01-31 21:43:54 -0600684 break;
685 cond_resched();
686 }
687
Olof Johansson0ce68c72007-04-28 15:36:40 -0500688 if (stat & PAS_DMA_RXCHAN_CCMDSTA_ACT)
Olof Johanssonf5cd7872007-01-31 21:43:54 -0600689 dev_err(&mac->dma_pdev->dev, "Failed to stop rx channel\n");
Olof Johanssonf5cd7872007-01-31 21:43:54 -0600690
691 for (retries = 0; retries < MAX_RETRIES; retries++) {
692 pci_read_config_dword(mac->dma_pdev,
693 PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
694 &stat);
Olof Johansson0ce68c72007-04-28 15:36:40 -0500695 if (!(stat & PAS_DMA_RXINT_RCMDSTA_ACT))
Olof Johanssonf5cd7872007-01-31 21:43:54 -0600696 break;
697 cond_resched();
698 }
699
Olof Johansson0ce68c72007-04-28 15:36:40 -0500700 if (stat & PAS_DMA_RXINT_RCMDSTA_ACT)
Olof Johanssonf5cd7872007-01-31 21:43:54 -0600701 dev_err(&mac->dma_pdev->dev, "Failed to stop rx interface\n");
Olof Johanssonf5cd7872007-01-31 21:43:54 -0600702
703 /* Then, disable the channel. This must be done separately from
704 * stopping, since you can't disable when active.
705 */
706
707 pci_write_config_dword(mac->dma_pdev,
708 PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch), 0);
709 pci_write_config_dword(mac->dma_pdev,
710 PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch), 0);
711 pci_write_config_dword(mac->dma_pdev,
712 PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 0);
713
Olof Johansson771f7402007-05-08 00:47:21 -0500714 free_irq(mac->tx_irq, dev);
715 free_irq(mac->rx_irq, dev);
Olof Johanssonf5cd7872007-01-31 21:43:54 -0600716
717 /* Free resources */
718 pasemi_mac_free_rx_resources(dev);
719 pasemi_mac_free_tx_resources(dev);
720
721 return 0;
722}
723
724static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
725{
726 struct pasemi_mac *mac = netdev_priv(dev);
727 struct pasemi_mac_txring *txring;
728 struct pasemi_mac_buffer *info;
729 struct pas_dma_xct_descr *dp;
730 u64 dflags;
731 dma_addr_t map;
732 int flags;
733
734 dflags = XCT_MACTX_O | XCT_MACTX_ST | XCT_MACTX_SS | XCT_MACTX_CRC_PAD;
735
736 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -0700737 const unsigned char *nh = skb_network_header(skb);
738
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700739 switch (ip_hdr(skb)->protocol) {
Olof Johanssonf5cd7872007-01-31 21:43:54 -0600740 case IPPROTO_TCP:
741 dflags |= XCT_MACTX_CSUM_TCP;
Arnaldo Carvalho de Melocfe1fc72007-03-16 17:26:39 -0300742 dflags |= XCT_MACTX_IPH(skb_network_header_len(skb) >> 2);
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -0700743 dflags |= XCT_MACTX_IPO(nh - skb->data);
Olof Johanssonf5cd7872007-01-31 21:43:54 -0600744 break;
745 case IPPROTO_UDP:
746 dflags |= XCT_MACTX_CSUM_UDP;
Arnaldo Carvalho de Melocfe1fc72007-03-16 17:26:39 -0300747 dflags |= XCT_MACTX_IPH(skb_network_header_len(skb) >> 2);
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -0700748 dflags |= XCT_MACTX_IPO(nh - skb->data);
Olof Johanssonf5cd7872007-01-31 21:43:54 -0600749 break;
750 }
751 }
752
753 map = pci_map_single(mac->dma_pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
754
755 if (dma_mapping_error(map))
756 return NETDEV_TX_BUSY;
757
758 txring = mac->tx;
759
760 spin_lock_irqsave(&txring->lock, flags);
761
762 if (txring->next_to_clean - txring->next_to_use == TX_RING_SIZE) {
763 spin_unlock_irqrestore(&txring->lock, flags);
764 pasemi_mac_clean_tx(mac);
765 spin_lock_irqsave(&txring->lock, flags);
766
767 if (txring->next_to_clean - txring->next_to_use ==
768 TX_RING_SIZE) {
769 /* Still no room -- stop the queue and wait for tx
770 * intr when there's room.
771 */
772 netif_stop_queue(dev);
773 goto out_err;
774 }
775 }
776
777
778 dp = &TX_DESC(mac, txring->next_to_use);
779 info = &TX_DESC_INFO(mac, txring->next_to_use);
780
781 dp->mactx = dflags | XCT_MACTX_LLEN(skb->len);
782 dp->ptr = XCT_PTR_LEN(skb->len) | XCT_PTR_ADDR(map);
783 info->dma = map;
784 info->skb = skb;
785
786 txring->next_to_use++;
787 mac->stats.tx_packets++;
788 mac->stats.tx_bytes += skb->len;
789
790 spin_unlock_irqrestore(&txring->lock, flags);
791
792 pci_write_config_dword(mac->dma_pdev,
793 PAS_DMA_TXCHAN_INCR(mac->dma_txch), 1);
794
795 return NETDEV_TX_OK;
796
797out_err:
798 spin_unlock_irqrestore(&txring->lock, flags);
799 pci_unmap_single(mac->dma_pdev, map, skb->len, PCI_DMA_TODEVICE);
800 return NETDEV_TX_BUSY;
801}
802
803static struct net_device_stats *pasemi_mac_get_stats(struct net_device *dev)
804{
805 struct pasemi_mac *mac = netdev_priv(dev);
806
807 return &mac->stats;
808}
809
810static void pasemi_mac_set_rx_mode(struct net_device *dev)
811{
812 struct pasemi_mac *mac = netdev_priv(dev);
813 unsigned int flags;
814
815 pci_read_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, &flags);
816
817 /* Set promiscuous */
818 if (dev->flags & IFF_PROMISC)
819 flags |= PAS_MAC_CFG_PCFG_PR;
820 else
821 flags &= ~PAS_MAC_CFG_PCFG_PR;
822
823 pci_write_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, flags);
824}
825
826
827static int pasemi_mac_poll(struct net_device *dev, int *budget)
828{
829 int pkts, limit = min(*budget, dev->quota);
830 struct pasemi_mac *mac = netdev_priv(dev);
831
832 pkts = pasemi_mac_clean_rx(mac, limit);
833
834 if (pkts < limit) {
835 /* all done, no more packets present */
836 netif_rx_complete(dev);
837
838 /* re-enable receive interrupts */
839 pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_COM_TIMEOUTCFG,
840 PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(1000000));
841 return 0;
842 } else {
843 /* used up our quantum, so reschedule */
844 dev->quota -= pkts;
845 *budget -= pkts;
846 return 1;
847 }
848}
849
850static int __devinit
851pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
852{
853 static int index = 0;
854 struct net_device *dev;
855 struct pasemi_mac *mac;
856 int err;
857
858 err = pci_enable_device(pdev);
859 if (err)
860 return err;
861
862 dev = alloc_etherdev(sizeof(struct pasemi_mac));
863 if (dev == NULL) {
864 dev_err(&pdev->dev,
865 "pasemi_mac: Could not allocate ethernet device.\n");
866 err = -ENOMEM;
867 goto out_disable_device;
868 }
869
870 SET_MODULE_OWNER(dev);
871 pci_set_drvdata(pdev, dev);
872 SET_NETDEV_DEV(dev, &pdev->dev);
873
874 mac = netdev_priv(dev);
875
876 mac->pdev = pdev;
877 mac->netdev = dev;
878 mac->dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL);
879
880 if (!mac->dma_pdev) {
881 dev_err(&pdev->dev, "Can't find DMA Controller\n");
882 err = -ENODEV;
883 goto out_free_netdev;
884 }
885
886 mac->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL);
887
888 if (!mac->iob_pdev) {
889 dev_err(&pdev->dev, "Can't find I/O Bridge\n");
890 err = -ENODEV;
891 goto out_put_dma_pdev;
892 }
893
894 /* These should come out of the device tree eventually */
895 mac->dma_txch = index;
896 mac->dma_rxch = index;
897
898 /* We probe GMAC before XAUI, but the DMA interfaces are
899 * in XAUI, GMAC order.
900 */
901 if (index < 4)
902 mac->dma_if = index + 2;
903 else
904 mac->dma_if = index - 4;
905 index++;
906
907 switch (pdev->device) {
908 case 0xa005:
909 mac->type = MAC_TYPE_GMAC;
910 break;
911 case 0xa006:
912 mac->type = MAC_TYPE_XAUI;
913 break;
914 default:
915 err = -ENODEV;
916 goto out;
917 }
918
919 /* get mac addr from device tree */
920 if (pasemi_get_mac_addr(mac) || !is_valid_ether_addr(mac->mac_addr)) {
921 err = -ENODEV;
922 goto out;
923 }
924 memcpy(dev->dev_addr, mac->mac_addr, sizeof(mac->mac_addr));
925
926 dev->open = pasemi_mac_open;
927 dev->stop = pasemi_mac_close;
928 dev->hard_start_xmit = pasemi_mac_start_tx;
929 dev->get_stats = pasemi_mac_get_stats;
930 dev->set_multicast_list = pasemi_mac_set_rx_mode;
931 dev->weight = 64;
932 dev->poll = pasemi_mac_poll;
933 dev->features = NETIF_F_HW_CSUM;
934
935 /* The dma status structure is located in the I/O bridge, and
936 * is cache coherent.
937 */
938 if (!dma_status)
939 /* XXXOJN This should come from the device tree */
940 dma_status = __ioremap(0xfd800000, 0x1000, 0);
941
942 mac->rx_status = &dma_status->rx_sta[mac->dma_rxch];
943 mac->tx_status = &dma_status->tx_sta[mac->dma_txch];
944
945 err = register_netdev(dev);
946
947 if (err) {
948 dev_err(&mac->pdev->dev, "register_netdev failed with error %d\n",
949 err);
950 goto out;
951 } else
952 printk(KERN_INFO "%s: PA Semi %s: intf %d, txch %d, rxch %d, "
953 "hw addr %02x:%02x:%02x:%02x:%02x:%02x\n",
954 dev->name, mac->type == MAC_TYPE_GMAC ? "GMAC" : "XAUI",
955 mac->dma_if, mac->dma_txch, mac->dma_rxch,
956 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
957 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
958
959 return err;
960
961out:
962 pci_dev_put(mac->iob_pdev);
963out_put_dma_pdev:
964 pci_dev_put(mac->dma_pdev);
965out_free_netdev:
966 free_netdev(dev);
967out_disable_device:
968 pci_disable_device(pdev);
969 return err;
970
971}
972
973static void __devexit pasemi_mac_remove(struct pci_dev *pdev)
974{
975 struct net_device *netdev = pci_get_drvdata(pdev);
976 struct pasemi_mac *mac;
977
978 if (!netdev)
979 return;
980
981 mac = netdev_priv(netdev);
982
983 unregister_netdev(netdev);
984
985 pci_disable_device(pdev);
986 pci_dev_put(mac->dma_pdev);
987 pci_dev_put(mac->iob_pdev);
988
989 pci_set_drvdata(pdev, NULL);
990 free_netdev(netdev);
991}
992
993static struct pci_device_id pasemi_mac_pci_tbl[] = {
994 { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa005) },
995 { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa006) },
996};
997
998MODULE_DEVICE_TABLE(pci, pasemi_mac_pci_tbl);
999
1000static struct pci_driver pasemi_mac_driver = {
1001 .name = "pasemi_mac",
1002 .id_table = pasemi_mac_pci_tbl,
1003 .probe = pasemi_mac_probe,
1004 .remove = __devexit_p(pasemi_mac_remove),
1005};
1006
1007static void __exit pasemi_mac_cleanup_module(void)
1008{
1009 pci_unregister_driver(&pasemi_mac_driver);
1010 __iounmap(dma_status);
1011 dma_status = NULL;
1012}
1013
1014int pasemi_mac_init_module(void)
1015{
1016 return pci_register_driver(&pasemi_mac_driver);
1017}
1018
1019MODULE_LICENSE("GPL");
1020MODULE_AUTHOR ("Olof Johansson <olof@lixom.net>");
1021MODULE_DESCRIPTION("PA Semi PWRficient Ethernet driver");
1022
1023module_init(pasemi_mac_init_module);
1024module_exit(pasemi_mac_cleanup_module);