blob: 4a55b055bcb38b19c79942ef54f481298c85d37a [file] [log] [blame]
Guo-Fu Tseng95252232008-09-16 01:00:11 +08001/*
2 * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver
3 *
4 * Copyright 2008 JMicron Technology Corporation
5 * http://www.jmicron.com/
6 *
7 * Author: Guo-Fu Tseng <cooldavid@cooldavid.org>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 */
23
Guo-Fu Tseng95252232008-09-16 01:00:11 +080024#include <linux/module.h>
25#include <linux/kernel.h>
26#include <linux/pci.h>
27#include <linux/netdevice.h>
28#include <linux/etherdevice.h>
29#include <linux/ethtool.h>
30#include <linux/mii.h>
31#include <linux/crc32.h>
32#include <linux/delay.h>
33#include <linux/spinlock.h>
34#include <linux/in.h>
35#include <linux/ip.h>
36#include <linux/ipv6.h>
37#include <linux/tcp.h>
38#include <linux/udp.h>
39#include <linux/if_vlan.h>
Kamalesh Babulalb7c6bfb2008-10-13 18:41:01 -070040#include <net/ip6_checksum.h>
Guo-Fu Tseng95252232008-09-16 01:00:11 +080041#include "jme.h"
42
43static int force_pseudohp = -1;
44static int no_pseudohp = -1;
45static int no_extplug = -1;
46module_param(force_pseudohp, int, 0);
47MODULE_PARM_DESC(force_pseudohp,
48 "Enable pseudo hot-plug feature manually by driver instead of BIOS.");
49module_param(no_pseudohp, int, 0);
50MODULE_PARM_DESC(no_pseudohp, "Disable pseudo hot-plug feature.");
51module_param(no_extplug, int, 0);
52MODULE_PARM_DESC(no_extplug,
53 "Do not use external plug signal for pseudo hot-plug.");
54
55static int
56jme_mdio_read(struct net_device *netdev, int phy, int reg)
57{
58 struct jme_adapter *jme = netdev_priv(netdev);
59 int i, val, again = (reg == MII_BMSR) ? 1 : 0;
60
61read_again:
62 jwrite32(jme, JME_SMI, SMI_OP_REQ |
63 smi_phy_addr(phy) |
64 smi_reg_addr(reg));
65
66 wmb();
67 for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) {
68 udelay(20);
69 val = jread32(jme, JME_SMI);
70 if ((val & SMI_OP_REQ) == 0)
71 break;
72 }
73
74 if (i == 0) {
75 jeprintk(jme->pdev, "phy(%d) read timeout : %d\n", phy, reg);
76 return 0;
77 }
78
79 if (again--)
80 goto read_again;
81
82 return (val & SMI_DATA_MASK) >> SMI_DATA_SHIFT;
83}
84
85static void
86jme_mdio_write(struct net_device *netdev,
87 int phy, int reg, int val)
88{
89 struct jme_adapter *jme = netdev_priv(netdev);
90 int i;
91
92 jwrite32(jme, JME_SMI, SMI_OP_WRITE | SMI_OP_REQ |
93 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
94 smi_phy_addr(phy) | smi_reg_addr(reg));
95
96 wmb();
97 for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) {
98 udelay(20);
99 if ((jread32(jme, JME_SMI) & SMI_OP_REQ) == 0)
100 break;
101 }
102
103 if (i == 0)
104 jeprintk(jme->pdev, "phy(%d) write timeout : %d\n", phy, reg);
105
106 return;
107}
108
109static inline void
110jme_reset_phy_processor(struct jme_adapter *jme)
111{
112 u32 val;
113
114 jme_mdio_write(jme->dev,
115 jme->mii_if.phy_id,
116 MII_ADVERTISE, ADVERTISE_ALL |
117 ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
118
119 if (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250)
120 jme_mdio_write(jme->dev,
121 jme->mii_if.phy_id,
122 MII_CTRL1000,
123 ADVERTISE_1000FULL | ADVERTISE_1000HALF);
124
125 val = jme_mdio_read(jme->dev,
126 jme->mii_if.phy_id,
127 MII_BMCR);
128
129 jme_mdio_write(jme->dev,
130 jme->mii_if.phy_id,
131 MII_BMCR, val | BMCR_RESET);
132
133 return;
134}
135
136static void
137jme_setup_wakeup_frame(struct jme_adapter *jme,
138 u32 *mask, u32 crc, int fnr)
139{
140 int i;
141
142 /*
143 * Setup CRC pattern
144 */
145 jwrite32(jme, JME_WFOI, WFOI_CRC_SEL | (fnr & WFOI_FRAME_SEL));
146 wmb();
147 jwrite32(jme, JME_WFODP, crc);
148 wmb();
149
150 /*
151 * Setup Mask
152 */
153 for (i = 0 ; i < WAKEUP_FRAME_MASK_DWNR ; ++i) {
154 jwrite32(jme, JME_WFOI,
155 ((i << WFOI_MASK_SHIFT) & WFOI_MASK_SEL) |
156 (fnr & WFOI_FRAME_SEL));
157 wmb();
158 jwrite32(jme, JME_WFODP, mask[i]);
159 wmb();
160 }
161}
162
163static inline void
164jme_reset_mac_processor(struct jme_adapter *jme)
165{
166 u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0};
167 u32 crc = 0xCDCDCDCD;
168 u32 gpreg0;
169 int i;
170
171 jwrite32(jme, JME_GHC, jme->reg_ghc | GHC_SWRST);
172 udelay(2);
173 jwrite32(jme, JME_GHC, jme->reg_ghc);
174
175 jwrite32(jme, JME_RXDBA_LO, 0x00000000);
176 jwrite32(jme, JME_RXDBA_HI, 0x00000000);
177 jwrite32(jme, JME_RXQDC, 0x00000000);
178 jwrite32(jme, JME_RXNDA, 0x00000000);
179 jwrite32(jme, JME_TXDBA_LO, 0x00000000);
180 jwrite32(jme, JME_TXDBA_HI, 0x00000000);
181 jwrite32(jme, JME_TXQDC, 0x00000000);
182 jwrite32(jme, JME_TXNDA, 0x00000000);
183
184 jwrite32(jme, JME_RXMCHT_LO, 0x00000000);
185 jwrite32(jme, JME_RXMCHT_HI, 0x00000000);
186 for (i = 0 ; i < WAKEUP_FRAME_NR ; ++i)
187 jme_setup_wakeup_frame(jme, mask, crc, i);
188 if (jme->fpgaver)
189 gpreg0 = GPREG0_DEFAULT | GPREG0_LNKINTPOLL;
190 else
191 gpreg0 = GPREG0_DEFAULT;
192 jwrite32(jme, JME_GPREG0, gpreg0);
Guo-Fu Tsenga821ebe2008-10-08 19:48:58 -0700193 jwrite32(jme, JME_GPREG1, GPREG1_DEFAULT);
Guo-Fu Tseng95252232008-09-16 01:00:11 +0800194}
195
196static inline void
197jme_reset_ghc_speed(struct jme_adapter *jme)
198{
199 jme->reg_ghc &= ~(GHC_SPEED_1000M | GHC_DPX);
200 jwrite32(jme, JME_GHC, jme->reg_ghc);
201}
202
203static inline void
204jme_clear_pm(struct jme_adapter *jme)
205{
206 jwrite32(jme, JME_PMCS, 0xFFFF0000 | jme->reg_pmcs);
207 pci_set_power_state(jme->pdev, PCI_D0);
208 pci_enable_wake(jme->pdev, PCI_D0, false);
209}
210
211static int
212jme_reload_eeprom(struct jme_adapter *jme)
213{
214 u32 val;
215 int i;
216
217 val = jread32(jme, JME_SMBCSR);
218
219 if (val & SMBCSR_EEPROMD) {
220 val |= SMBCSR_CNACK;
221 jwrite32(jme, JME_SMBCSR, val);
222 val |= SMBCSR_RELOAD;
223 jwrite32(jme, JME_SMBCSR, val);
224 mdelay(12);
225
226 for (i = JME_EEPROM_RELOAD_TIMEOUT; i > 0; --i) {
227 mdelay(1);
228 if ((jread32(jme, JME_SMBCSR) & SMBCSR_RELOAD) == 0)
229 break;
230 }
231
232 if (i == 0) {
233 jeprintk(jme->pdev, "eeprom reload timeout\n");
234 return -EIO;
235 }
236 }
237
238 return 0;
239}
240
241static void
242jme_load_macaddr(struct net_device *netdev)
243{
244 struct jme_adapter *jme = netdev_priv(netdev);
245 unsigned char macaddr[6];
246 u32 val;
247
248 spin_lock_bh(&jme->macaddr_lock);
249 val = jread32(jme, JME_RXUMA_LO);
250 macaddr[0] = (val >> 0) & 0xFF;
251 macaddr[1] = (val >> 8) & 0xFF;
252 macaddr[2] = (val >> 16) & 0xFF;
253 macaddr[3] = (val >> 24) & 0xFF;
254 val = jread32(jme, JME_RXUMA_HI);
255 macaddr[4] = (val >> 0) & 0xFF;
256 macaddr[5] = (val >> 8) & 0xFF;
257 memcpy(netdev->dev_addr, macaddr, 6);
258 spin_unlock_bh(&jme->macaddr_lock);
259}
260
261static inline void
262jme_set_rx_pcc(struct jme_adapter *jme, int p)
263{
264 switch (p) {
265 case PCC_OFF:
266 jwrite32(jme, JME_PCCRX0,
267 ((PCC_OFF_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
268 ((PCC_OFF_CNT << PCCRX_SHIFT) & PCCRX_MASK));
269 break;
270 case PCC_P1:
271 jwrite32(jme, JME_PCCRX0,
272 ((PCC_P1_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
273 ((PCC_P1_CNT << PCCRX_SHIFT) & PCCRX_MASK));
274 break;
275 case PCC_P2:
276 jwrite32(jme, JME_PCCRX0,
277 ((PCC_P2_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
278 ((PCC_P2_CNT << PCCRX_SHIFT) & PCCRX_MASK));
279 break;
280 case PCC_P3:
281 jwrite32(jme, JME_PCCRX0,
282 ((PCC_P3_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
283 ((PCC_P3_CNT << PCCRX_SHIFT) & PCCRX_MASK));
284 break;
285 default:
286 break;
287 }
288 wmb();
289
290 if (!(test_bit(JME_FLAG_POLL, &jme->flags)))
291 msg_rx_status(jme, "Switched to PCC_P%d\n", p);
292}
293
294static void
295jme_start_irq(struct jme_adapter *jme)
296{
297 register struct dynpcc_info *dpi = &(jme->dpi);
298
299 jme_set_rx_pcc(jme, PCC_P1);
300 dpi->cur = PCC_P1;
301 dpi->attempt = PCC_P1;
302 dpi->cnt = 0;
303
304 jwrite32(jme, JME_PCCTX,
305 ((PCC_TX_TO << PCCTXTO_SHIFT) & PCCTXTO_MASK) |
306 ((PCC_TX_CNT << PCCTX_SHIFT) & PCCTX_MASK) |
307 PCCTXQ0_EN
308 );
309
310 /*
311 * Enable Interrupts
312 */
313 jwrite32(jme, JME_IENS, INTR_ENABLE);
314}
315
316static inline void
317jme_stop_irq(struct jme_adapter *jme)
318{
319 /*
320 * Disable Interrupts
321 */
322 jwrite32f(jme, JME_IENC, INTR_ENABLE);
323}
324
325static inline void
326jme_enable_shadow(struct jme_adapter *jme)
327{
328 jwrite32(jme,
329 JME_SHBA_LO,
330 ((u32)jme->shadow_dma & ~((u32)0x1F)) | SHBA_POSTEN);
331}
332
333static inline void
334jme_disable_shadow(struct jme_adapter *jme)
335{
336 jwrite32(jme, JME_SHBA_LO, 0x0);
337}
338
339static u32
340jme_linkstat_from_phy(struct jme_adapter *jme)
341{
342 u32 phylink, bmsr;
343
344 phylink = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 17);
345 bmsr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMSR);
346 if (bmsr & BMSR_ANCOMP)
347 phylink |= PHY_LINK_AUTONEG_COMPLETE;
348
349 return phylink;
350}
351
352static inline void
353jme_set_phyfifoa(struct jme_adapter *jme)
354{
355 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0004);
356}
357
358static inline void
359jme_set_phyfifob(struct jme_adapter *jme)
360{
361 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0000);
362}
363
364static int
365jme_check_link(struct net_device *netdev, int testonly)
366{
367 struct jme_adapter *jme = netdev_priv(netdev);
Guo-Fu Tsenga821ebe2008-10-08 19:48:58 -0700368 u32 phylink, ghc, cnt = JME_SPDRSV_TIMEOUT, bmcr, gpreg1;
Guo-Fu Tseng95252232008-09-16 01:00:11 +0800369 char linkmsg[64];
370 int rc = 0;
371
372 linkmsg[0] = '\0';
373
374 if (jme->fpgaver)
375 phylink = jme_linkstat_from_phy(jme);
376 else
377 phylink = jread32(jme, JME_PHY_LINK);
378
379 if (phylink & PHY_LINK_UP) {
380 if (!(phylink & PHY_LINK_AUTONEG_COMPLETE)) {
381 /*
382 * If we did not enable AN
383 * Speed/Duplex Info should be obtained from SMI
384 */
385 phylink = PHY_LINK_UP;
386
387 bmcr = jme_mdio_read(jme->dev,
388 jme->mii_if.phy_id,
389 MII_BMCR);
390
391 phylink |= ((bmcr & BMCR_SPEED1000) &&
392 (bmcr & BMCR_SPEED100) == 0) ?
393 PHY_LINK_SPEED_1000M :
394 (bmcr & BMCR_SPEED100) ?
395 PHY_LINK_SPEED_100M :
396 PHY_LINK_SPEED_10M;
397
398 phylink |= (bmcr & BMCR_FULLDPLX) ?
399 PHY_LINK_DUPLEX : 0;
400
401 strcat(linkmsg, "Forced: ");
402 } else {
403 /*
404 * Keep polling for speed/duplex resolve complete
405 */
406 while (!(phylink & PHY_LINK_SPEEDDPU_RESOLVED) &&
407 --cnt) {
408
409 udelay(1);
410
411 if (jme->fpgaver)
412 phylink = jme_linkstat_from_phy(jme);
413 else
414 phylink = jread32(jme, JME_PHY_LINK);
415 }
416 if (!cnt)
417 jeprintk(jme->pdev,
418 "Waiting speed resolve timeout.\n");
419
420 strcat(linkmsg, "ANed: ");
421 }
422
423 if (jme->phylink == phylink) {
424 rc = 1;
425 goto out;
426 }
427 if (testonly)
428 goto out;
429
430 jme->phylink = phylink;
431
Guo-Fu Tsengeb352b82009-02-27 17:58:16 +0000432 ghc = jme->reg_ghc & ~(GHC_SPEED | GHC_DPX |
433 GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE |
434 GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY);
Guo-Fu Tseng95252232008-09-16 01:00:11 +0800435 switch (phylink & PHY_LINK_SPEED_MASK) {
436 case PHY_LINK_SPEED_10M:
akeemting4f40bf42008-12-03 21:19:16 -0800437 ghc |= GHC_SPEED_10M |
438 GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE;
Guo-Fu Tseng95252232008-09-16 01:00:11 +0800439 strcat(linkmsg, "10 Mbps, ");
Guo-Fu Tseng95252232008-09-16 01:00:11 +0800440 break;
441 case PHY_LINK_SPEED_100M:
akeemting4f40bf42008-12-03 21:19:16 -0800442 ghc |= GHC_SPEED_100M |
443 GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE;
Guo-Fu Tseng95252232008-09-16 01:00:11 +0800444 strcat(linkmsg, "100 Mbps, ");
Guo-Fu Tseng95252232008-09-16 01:00:11 +0800445 break;
446 case PHY_LINK_SPEED_1000M:
akeemting4f40bf42008-12-03 21:19:16 -0800447 ghc |= GHC_SPEED_1000M |
448 GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY;
Guo-Fu Tseng95252232008-09-16 01:00:11 +0800449 strcat(linkmsg, "1000 Mbps, ");
Guo-Fu Tseng95252232008-09-16 01:00:11 +0800450 break;
451 default:
452 break;
453 }
Guo-Fu Tseng95252232008-09-16 01:00:11 +0800454
455 if (phylink & PHY_LINK_DUPLEX) {
456 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT);
Guo-Fu Tsenga821ebe2008-10-08 19:48:58 -0700457 ghc |= GHC_DPX;
Guo-Fu Tseng95252232008-09-16 01:00:11 +0800458 } else {
459 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT |
460 TXMCS_BACKOFF |
461 TXMCS_CARRIERSENSE |
462 TXMCS_COLLISION);
463 jwrite32(jme, JME_TXTRHD, TXTRHD_TXPEN |
464 ((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) |
465 TXTRHD_TXREN |
466 ((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL));
467 }
Guo-Fu Tsenga821ebe2008-10-08 19:48:58 -0700468
469 gpreg1 = GPREG1_DEFAULT;
470 if (is_buggy250(jme->pdev->device, jme->chiprev)) {
471 if (!(phylink & PHY_LINK_DUPLEX))
472 gpreg1 |= GPREG1_HALFMODEPATCH;
473 switch (phylink & PHY_LINK_SPEED_MASK) {
474 case PHY_LINK_SPEED_10M:
475 jme_set_phyfifoa(jme);
476 gpreg1 |= GPREG1_RSSPATCH;
477 break;
478 case PHY_LINK_SPEED_100M:
479 jme_set_phyfifob(jme);
480 gpreg1 |= GPREG1_RSSPATCH;
481 break;
482 case PHY_LINK_SPEED_1000M:
483 jme_set_phyfifoa(jme);
484 break;
485 default:
486 break;
487 }
488 }
akeemting4f40bf42008-12-03 21:19:16 -0800489
Guo-Fu Tsenga821ebe2008-10-08 19:48:58 -0700490 jwrite32(jme, JME_GPREG1, gpreg1);
Guo-Fu Tseng95252232008-09-16 01:00:11 +0800491 jwrite32(jme, JME_GHC, ghc);
akeemting4f40bf42008-12-03 21:19:16 -0800492 jme->reg_ghc = ghc;
Guo-Fu Tseng95252232008-09-16 01:00:11 +0800493
akeemting4f40bf42008-12-03 21:19:16 -0800494 strcat(linkmsg, (phylink & PHY_LINK_DUPLEX) ?
495 "Full-Duplex, " :
496 "Half-Duplex, ");
497 strcat(linkmsg, (phylink & PHY_LINK_MDI_STAT) ?
498 "MDI-X" :
499 "MDI");
Guo-Fu Tseng95252232008-09-16 01:00:11 +0800500 msg_link(jme, "Link is up at %s.\n", linkmsg);
501 netif_carrier_on(netdev);
502 } else {
503 if (testonly)
504 goto out;
505
506 msg_link(jme, "Link is down.\n");
507 jme->phylink = 0;
508 netif_carrier_off(netdev);
509 }
510
511out:
512 return rc;
513}
514
515static int
516jme_setup_tx_resources(struct jme_adapter *jme)
517{
518 struct jme_ring *txring = &(jme->txring[0]);
519
520 txring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
521 TX_RING_ALLOC_SIZE(jme->tx_ring_size),
522 &(txring->dmaalloc),
523 GFP_ATOMIC);
524
Guo-Fu Tseng47bd10d2009-07-06 04:39:46 +0000525 if (!txring->alloc)
526 goto err_set_null;
Guo-Fu Tseng95252232008-09-16 01:00:11 +0800527
528 /*
529 * 16 Bytes align
530 */
531 txring->desc = (void *)ALIGN((unsigned long)(txring->alloc),
532 RING_DESC_ALIGN);
533 txring->dma = ALIGN(txring->dmaalloc, RING_DESC_ALIGN);
534 txring->next_to_use = 0;
535 atomic_set(&txring->next_to_clean, 0);
536 atomic_set(&txring->nr_free, jme->tx_ring_size);
537
Guo-Fu Tseng47bd10d2009-07-06 04:39:46 +0000538 txring->bufinf = kmalloc(sizeof(struct jme_buffer_info) *
539 jme->tx_ring_size, GFP_ATOMIC);
540 if (unlikely(!(txring->bufinf)))
541 goto err_free_txring;
542
Guo-Fu Tseng95252232008-09-16 01:00:11 +0800543 /*
544 * Initialize Transmit Descriptors
545 */
546 memset(txring->alloc, 0, TX_RING_ALLOC_SIZE(jme->tx_ring_size));
547 memset(txring->bufinf, 0,
548 sizeof(struct jme_buffer_info) * jme->tx_ring_size);
549
550 return 0;
Guo-Fu Tseng47bd10d2009-07-06 04:39:46 +0000551
552err_free_txring:
553 dma_free_coherent(&(jme->pdev->dev),
554 TX_RING_ALLOC_SIZE(jme->tx_ring_size),
555 txring->alloc,
556 txring->dmaalloc);
557
558err_set_null:
559 txring->desc = NULL;
560 txring->dmaalloc = 0;
561 txring->dma = 0;
562 txring->bufinf = NULL;
563
564 return -ENOMEM;
Guo-Fu Tseng95252232008-09-16 01:00:11 +0800565}
566
567static void
568jme_free_tx_resources(struct jme_adapter *jme)
569{
570 int i;
571 struct jme_ring *txring = &(jme->txring[0]);
Guo-Fu Tsengeacf69a2009-07-06 04:36:30 +0000572 struct jme_buffer_info *txbi;
Guo-Fu Tseng95252232008-09-16 01:00:11 +0800573
574 if (txring->alloc) {
Guo-Fu Tseng47bd10d2009-07-06 04:39:46 +0000575 if (txring->bufinf) {
576 for (i = 0 ; i < jme->tx_ring_size ; ++i) {
577 txbi = txring->bufinf + i;
578 if (txbi->skb) {
579 dev_kfree_skb(txbi->skb);
580 txbi->skb = NULL;
581 }
582 txbi->mapping = 0;
583 txbi->len = 0;
584 txbi->nr_desc = 0;
585 txbi->start_xmit = 0;
Guo-Fu Tseng95252232008-09-16 01:00:11 +0800586 }
Guo-Fu Tseng47bd10d2009-07-06 04:39:46 +0000587 kfree(txring->bufinf);
Guo-Fu Tseng95252232008-09-16 01:00:11 +0800588 }
589
590 dma_free_coherent(&(jme->pdev->dev),
591 TX_RING_ALLOC_SIZE(jme->tx_ring_size),
592 txring->alloc,
593 txring->dmaalloc);
594
595 txring->alloc = NULL;
596 txring->desc = NULL;
597 txring->dmaalloc = 0;
598 txring->dma = 0;
Guo-Fu Tseng47bd10d2009-07-06 04:39:46 +0000599 txring->bufinf = NULL;
Guo-Fu Tseng95252232008-09-16 01:00:11 +0800600 }
601 txring->next_to_use = 0;
602 atomic_set(&txring->next_to_clean, 0);
603 atomic_set(&txring->nr_free, 0);
Guo-Fu Tseng95252232008-09-16 01:00:11 +0800604}
605
606static inline void
607jme_enable_tx_engine(struct jme_adapter *jme)
608{
609 /*
610 * Select Queue 0
611 */
612 jwrite32(jme, JME_TXCS, TXCS_DEFAULT | TXCS_SELECT_QUEUE0);
613 wmb();
614
615 /*
616 * Setup TX Queue 0 DMA Bass Address
617 */
618 jwrite32(jme, JME_TXDBA_LO, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
619 jwrite32(jme, JME_TXDBA_HI, (__u64)(jme->txring[0].dma) >> 32);
620 jwrite32(jme, JME_TXNDA, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
621
622 /*
623 * Setup TX Descptor Count
624 */
625 jwrite32(jme, JME_TXQDC, jme->tx_ring_size);
626
627 /*
628 * Enable TX Engine
629 */
630 wmb();
631 jwrite32(jme, JME_TXCS, jme->reg_txcs |
632 TXCS_SELECT_QUEUE0 |
633 TXCS_ENABLE);
634
635}
636
637static inline void
638jme_restart_tx_engine(struct jme_adapter *jme)
639{
640 /*
641 * Restart TX Engine
642 */
643 jwrite32(jme, JME_TXCS, jme->reg_txcs |
644 TXCS_SELECT_QUEUE0 |
645 TXCS_ENABLE);
646}
647
648static inline void
649jme_disable_tx_engine(struct jme_adapter *jme)
650{
651 int i;
652 u32 val;
653
654 /*
655 * Disable TX Engine
656 */
657 jwrite32(jme, JME_TXCS, jme->reg_txcs | TXCS_SELECT_QUEUE0);
658 wmb();
659
660 val = jread32(jme, JME_TXCS);
661 for (i = JME_TX_DISABLE_TIMEOUT ; (val & TXCS_ENABLE) && i > 0 ; --i) {
662 mdelay(1);
663 val = jread32(jme, JME_TXCS);
664 rmb();
665 }
666
667 if (!i)
668 jeprintk(jme->pdev, "Disable TX engine timeout.\n");
669}
670
671static void
672jme_set_clean_rxdesc(struct jme_adapter *jme, int i)
673{
Guo-Fu Tsengeacf69a2009-07-06 04:36:30 +0000674 struct jme_ring *rxring = &(jme->rxring[0]);
Guo-Fu Tseng95252232008-09-16 01:00:11 +0800675 register struct rxdesc *rxdesc = rxring->desc;
676 struct jme_buffer_info *rxbi = rxring->bufinf;
677 rxdesc += i;
678 rxbi += i;
679
680 rxdesc->dw[0] = 0;
681 rxdesc->dw[1] = 0;
682 rxdesc->desc1.bufaddrh = cpu_to_le32((__u64)rxbi->mapping >> 32);
683 rxdesc->desc1.bufaddrl = cpu_to_le32(
684 (__u64)rxbi->mapping & 0xFFFFFFFFUL);
685 rxdesc->desc1.datalen = cpu_to_le16(rxbi->len);
686 if (jme->dev->features & NETIF_F_HIGHDMA)
687 rxdesc->desc1.flags = RXFLAG_64BIT;
688 wmb();
689 rxdesc->desc1.flags |= RXFLAG_OWN | RXFLAG_INT;
690}
691
692static int
693jme_make_new_rx_buf(struct jme_adapter *jme, int i)
694{
695 struct jme_ring *rxring = &(jme->rxring[0]);
696 struct jme_buffer_info *rxbi = rxring->bufinf + i;
697 struct sk_buff *skb;
698
699 skb = netdev_alloc_skb(jme->dev,
700 jme->dev->mtu + RX_EXTRA_LEN);
701 if (unlikely(!skb))
702 return -ENOMEM;
703
704 rxbi->skb = skb;
705 rxbi->len = skb_tailroom(skb);
706 rxbi->mapping = pci_map_page(jme->pdev,
707 virt_to_page(skb->data),
708 offset_in_page(skb->data),
709 rxbi->len,
710 PCI_DMA_FROMDEVICE);
711
712 return 0;
713}
714
715static void
716jme_free_rx_buf(struct jme_adapter *jme, int i)
717{
718 struct jme_ring *rxring = &(jme->rxring[0]);
719 struct jme_buffer_info *rxbi = rxring->bufinf;
720 rxbi += i;
721
722 if (rxbi->skb) {
723 pci_unmap_page(jme->pdev,
724 rxbi->mapping,
725 rxbi->len,
726 PCI_DMA_FROMDEVICE);
727 dev_kfree_skb(rxbi->skb);
728 rxbi->skb = NULL;
729 rxbi->mapping = 0;
730 rxbi->len = 0;
731 }
732}
733
734static void
735jme_free_rx_resources(struct jme_adapter *jme)
736{
737 int i;
738 struct jme_ring *rxring = &(jme->rxring[0]);
739
740 if (rxring->alloc) {
Guo-Fu Tseng47bd10d2009-07-06 04:39:46 +0000741 if (rxring->bufinf) {
742 for (i = 0 ; i < jme->rx_ring_size ; ++i)
743 jme_free_rx_buf(jme, i);
744 kfree(rxring->bufinf);
745 }
Guo-Fu Tseng95252232008-09-16 01:00:11 +0800746
747 dma_free_coherent(&(jme->pdev->dev),
748 RX_RING_ALLOC_SIZE(jme->rx_ring_size),
749 rxring->alloc,
750 rxring->dmaalloc);
751 rxring->alloc = NULL;
752 rxring->desc = NULL;
753 rxring->dmaalloc = 0;
754 rxring->dma = 0;
Guo-Fu Tseng47bd10d2009-07-06 04:39:46 +0000755 rxring->bufinf = NULL;
Guo-Fu Tseng95252232008-09-16 01:00:11 +0800756 }
757 rxring->next_to_use = 0;
758 atomic_set(&rxring->next_to_clean, 0);
759}
760
761static int
762jme_setup_rx_resources(struct jme_adapter *jme)
763{
764 int i;
765 struct jme_ring *rxring = &(jme->rxring[0]);
766
767 rxring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
768 RX_RING_ALLOC_SIZE(jme->rx_ring_size),
769 &(rxring->dmaalloc),
770 GFP_ATOMIC);
Guo-Fu Tseng47bd10d2009-07-06 04:39:46 +0000771 if (!rxring->alloc)
772 goto err_set_null;
Guo-Fu Tseng95252232008-09-16 01:00:11 +0800773
774 /*
775 * 16 Bytes align
776 */
777 rxring->desc = (void *)ALIGN((unsigned long)(rxring->alloc),
778 RING_DESC_ALIGN);
779 rxring->dma = ALIGN(rxring->dmaalloc, RING_DESC_ALIGN);
780 rxring->next_to_use = 0;
781 atomic_set(&rxring->next_to_clean, 0);
782
Guo-Fu Tseng47bd10d2009-07-06 04:39:46 +0000783 rxring->bufinf = kmalloc(sizeof(struct jme_buffer_info) *
784 jme->rx_ring_size, GFP_ATOMIC);
785 if (unlikely(!(rxring->bufinf)))
786 goto err_free_rxring;
787
Guo-Fu Tseng95252232008-09-16 01:00:11 +0800788 /*
789 * Initiallize Receive Descriptors
790 */
Guo-Fu Tseng47bd10d2009-07-06 04:39:46 +0000791 memset(rxring->bufinf, 0,
792 sizeof(struct jme_buffer_info) * jme->rx_ring_size);
Guo-Fu Tseng95252232008-09-16 01:00:11 +0800793 for (i = 0 ; i < jme->rx_ring_size ; ++i) {
794 if (unlikely(jme_make_new_rx_buf(jme, i))) {
795 jme_free_rx_resources(jme);
796 return -ENOMEM;
797 }
798
799 jme_set_clean_rxdesc(jme, i);
800 }
801
802 return 0;
Guo-Fu Tseng47bd10d2009-07-06 04:39:46 +0000803
804err_free_rxring:
805 dma_free_coherent(&(jme->pdev->dev),
806 RX_RING_ALLOC_SIZE(jme->rx_ring_size),
807 rxring->alloc,
808 rxring->dmaalloc);
809err_set_null:
810 rxring->desc = NULL;
811 rxring->dmaalloc = 0;
812 rxring->dma = 0;
813 rxring->bufinf = NULL;
814
815 return -ENOMEM;
Guo-Fu Tseng95252232008-09-16 01:00:11 +0800816}
817
818static inline void
819jme_enable_rx_engine(struct jme_adapter *jme)
820{
821 /*
822 * Select Queue 0
823 */
824 jwrite32(jme, JME_RXCS, jme->reg_rxcs |
825 RXCS_QUEUESEL_Q0);
826 wmb();
827
828 /*
829 * Setup RX DMA Bass Address
830 */
Guo-Fu Tsengeacf69a2009-07-06 04:36:30 +0000831 jwrite32(jme, JME_RXDBA_LO, (__u64)(jme->rxring[0].dma) & 0xFFFFFFFFUL);
Guo-Fu Tseng95252232008-09-16 01:00:11 +0800832 jwrite32(jme, JME_RXDBA_HI, (__u64)(jme->rxring[0].dma) >> 32);
Guo-Fu Tsengeacf69a2009-07-06 04:36:30 +0000833 jwrite32(jme, JME_RXNDA, (__u64)(jme->rxring[0].dma) & 0xFFFFFFFFUL);
Guo-Fu Tseng95252232008-09-16 01:00:11 +0800834
835 /*
836 * Setup RX Descriptor Count
837 */
838 jwrite32(jme, JME_RXQDC, jme->rx_ring_size);
839
840 /*
841 * Setup Unicast Filter
842 */
843 jme_set_multi(jme->dev);
844
845 /*
846 * Enable RX Engine
847 */
848 wmb();
849 jwrite32(jme, JME_RXCS, jme->reg_rxcs |
850 RXCS_QUEUESEL_Q0 |
851 RXCS_ENABLE |
852 RXCS_QST);
853}
854
855static inline void
856jme_restart_rx_engine(struct jme_adapter *jme)
857{
858 /*
859 * Start RX Engine
860 */
861 jwrite32(jme, JME_RXCS, jme->reg_rxcs |
862 RXCS_QUEUESEL_Q0 |
863 RXCS_ENABLE |
864 RXCS_QST);
865}
866
867static inline void
868jme_disable_rx_engine(struct jme_adapter *jme)
869{
870 int i;
871 u32 val;
872
873 /*
874 * Disable RX Engine
875 */
876 jwrite32(jme, JME_RXCS, jme->reg_rxcs);
877 wmb();
878
879 val = jread32(jme, JME_RXCS);
880 for (i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i) {
881 mdelay(1);
882 val = jread32(jme, JME_RXCS);
883 rmb();
884 }
885
886 if (!i)
887 jeprintk(jme->pdev, "Disable RX engine timeout.\n");
888
889}
890
891static int
892jme_rxsum_ok(struct jme_adapter *jme, u16 flags)
893{
894 if (!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4)))
895 return false;
896
897 if (unlikely(!(flags & RXWBFLAG_MF) &&
898 (flags & RXWBFLAG_TCPON) && !(flags & RXWBFLAG_TCPCS))) {
899 msg_rx_err(jme, "TCP Checksum error.\n");
900 goto out_sumerr;
901 }
902
903 if (unlikely(!(flags & RXWBFLAG_MF) &&
904 (flags & RXWBFLAG_UDPON) && !(flags & RXWBFLAG_UDPCS))) {
905 msg_rx_err(jme, "UDP Checksum error.\n");
906 goto out_sumerr;
907 }
908
909 if (unlikely((flags & RXWBFLAG_IPV4) && !(flags & RXWBFLAG_IPCS))) {
910 msg_rx_err(jme, "IPv4 Checksum error.\n");
911 goto out_sumerr;
912 }
913
914 return true;
915
916out_sumerr:
917 return false;
918}
919
920static void
921jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
922{
923 struct jme_ring *rxring = &(jme->rxring[0]);
924 struct rxdesc *rxdesc = rxring->desc;
925 struct jme_buffer_info *rxbi = rxring->bufinf;
926 struct sk_buff *skb;
927 int framesize;
928
929 rxdesc += idx;
930 rxbi += idx;
931
932 skb = rxbi->skb;
933 pci_dma_sync_single_for_cpu(jme->pdev,
934 rxbi->mapping,
935 rxbi->len,
936 PCI_DMA_FROMDEVICE);
937
938 if (unlikely(jme_make_new_rx_buf(jme, idx))) {
939 pci_dma_sync_single_for_device(jme->pdev,
940 rxbi->mapping,
941 rxbi->len,
942 PCI_DMA_FROMDEVICE);
943
944 ++(NET_STAT(jme).rx_dropped);
945 } else {
946 framesize = le16_to_cpu(rxdesc->descwb.framesize)
947 - RX_PREPAD_SIZE;
948
949 skb_reserve(skb, RX_PREPAD_SIZE);
950 skb_put(skb, framesize);
951 skb->protocol = eth_type_trans(skb, jme->dev);
952
Harvey Harrison31c221c2008-11-19 15:50:59 -0800953 if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags)))
Guo-Fu Tseng95252232008-09-16 01:00:11 +0800954 skb->ip_summed = CHECKSUM_UNNECESSARY;
955 else
956 skb->ip_summed = CHECKSUM_NONE;
957
Harvey Harrison31c221c2008-11-19 15:50:59 -0800958 if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) {
Guo-Fu Tseng95252232008-09-16 01:00:11 +0800959 if (jme->vlgrp) {
960 jme->jme_vlan_rx(skb, jme->vlgrp,
Harvey Harrison31c221c2008-11-19 15:50:59 -0800961 le16_to_cpu(rxdesc->descwb.vlan));
Guo-Fu Tseng95252232008-09-16 01:00:11 +0800962 NET_STAT(jme).rx_bytes += 4;
963 }
964 } else {
965 jme->jme_rx(skb);
966 }
967
Harvey Harrison31c221c2008-11-19 15:50:59 -0800968 if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_DEST)) ==
969 cpu_to_le16(RXWBFLAG_DEST_MUL))
Guo-Fu Tseng95252232008-09-16 01:00:11 +0800970 ++(NET_STAT(jme).multicast);
971
Guo-Fu Tseng95252232008-09-16 01:00:11 +0800972 NET_STAT(jme).rx_bytes += framesize;
973 ++(NET_STAT(jme).rx_packets);
974 }
975
976 jme_set_clean_rxdesc(jme, idx);
977
978}
979
980static int
981jme_process_receive(struct jme_adapter *jme, int limit)
982{
983 struct jme_ring *rxring = &(jme->rxring[0]);
984 struct rxdesc *rxdesc = rxring->desc;
985 int i, j, ccnt, desccnt, mask = jme->rx_ring_mask;
986
987 if (unlikely(!atomic_dec_and_test(&jme->rx_cleaning)))
988 goto out_inc;
989
990 if (unlikely(atomic_read(&jme->link_changing) != 1))
991 goto out_inc;
992
993 if (unlikely(!netif_carrier_ok(jme->dev)))
994 goto out_inc;
995
996 i = atomic_read(&rxring->next_to_clean);
Roel Kluin858b9ce2009-03-04 00:11:42 -0800997 while (limit > 0) {
Guo-Fu Tseng95252232008-09-16 01:00:11 +0800998 rxdesc = rxring->desc;
999 rxdesc += i;
1000
Harvey Harrison31c221c2008-11-19 15:50:59 -08001001 if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_OWN)) ||
Guo-Fu Tseng95252232008-09-16 01:00:11 +08001002 !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL))
1003 goto out;
Roel Kluin858b9ce2009-03-04 00:11:42 -08001004 --limit;
Guo-Fu Tseng95252232008-09-16 01:00:11 +08001005
1006 desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT;
1007
1008 if (unlikely(desccnt > 1 ||
1009 rxdesc->descwb.errstat & RXWBERR_ALLERR)) {
1010
1011 if (rxdesc->descwb.errstat & RXWBERR_CRCERR)
1012 ++(NET_STAT(jme).rx_crc_errors);
1013 else if (rxdesc->descwb.errstat & RXWBERR_OVERUN)
1014 ++(NET_STAT(jme).rx_fifo_errors);
1015 else
1016 ++(NET_STAT(jme).rx_errors);
1017
1018 if (desccnt > 1)
1019 limit -= desccnt - 1;
1020
1021 for (j = i, ccnt = desccnt ; ccnt-- ; ) {
1022 jme_set_clean_rxdesc(jme, j);
1023 j = (j + 1) & (mask);
1024 }
1025
1026 } else {
1027 jme_alloc_and_feed_skb(jme, i);
1028 }
1029
1030 i = (i + desccnt) & (mask);
1031 }
1032
1033out:
1034 atomic_set(&rxring->next_to_clean, i);
1035
1036out_inc:
1037 atomic_inc(&jme->rx_cleaning);
1038
1039 return limit > 0 ? limit : 0;
1040
1041}
1042
1043static void
1044jme_attempt_pcc(struct dynpcc_info *dpi, int atmp)
1045{
1046 if (likely(atmp == dpi->cur)) {
1047 dpi->cnt = 0;
1048 return;
1049 }
1050
1051 if (dpi->attempt == atmp) {
1052 ++(dpi->cnt);
1053 } else {
1054 dpi->attempt = atmp;
1055 dpi->cnt = 0;
1056 }
1057
1058}
1059
1060static void
1061jme_dynamic_pcc(struct jme_adapter *jme)
1062{
1063 register struct dynpcc_info *dpi = &(jme->dpi);
1064
1065 if ((NET_STAT(jme).rx_bytes - dpi->last_bytes) > PCC_P3_THRESHOLD)
1066 jme_attempt_pcc(dpi, PCC_P3);
1067 else if ((NET_STAT(jme).rx_packets - dpi->last_pkts) > PCC_P2_THRESHOLD
1068 || dpi->intr_cnt > PCC_INTR_THRESHOLD)
1069 jme_attempt_pcc(dpi, PCC_P2);
1070 else
1071 jme_attempt_pcc(dpi, PCC_P1);
1072
1073 if (unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) {
1074 if (dpi->attempt < dpi->cur)
1075 tasklet_schedule(&jme->rxclean_task);
1076 jme_set_rx_pcc(jme, dpi->attempt);
1077 dpi->cur = dpi->attempt;
1078 dpi->cnt = 0;
1079 }
1080}
1081
1082static void
1083jme_start_pcc_timer(struct jme_adapter *jme)
1084{
1085 struct dynpcc_info *dpi = &(jme->dpi);
1086 dpi->last_bytes = NET_STAT(jme).rx_bytes;
1087 dpi->last_pkts = NET_STAT(jme).rx_packets;
1088 dpi->intr_cnt = 0;
1089 jwrite32(jme, JME_TMCSR,
1090 TMCSR_EN | ((0xFFFFFF - PCC_INTERVAL_US) & TMCSR_CNT));
1091}
1092
1093static inline void
1094jme_stop_pcc_timer(struct jme_adapter *jme)
1095{
1096 jwrite32(jme, JME_TMCSR, 0);
1097}
1098
1099static void
1100jme_shutdown_nic(struct jme_adapter *jme)
1101{
1102 u32 phylink;
1103
1104 phylink = jme_linkstat_from_phy(jme);
1105
1106 if (!(phylink & PHY_LINK_UP)) {
1107 /*
1108 * Disable all interrupt before issue timer
1109 */
1110 jme_stop_irq(jme);
1111 jwrite32(jme, JME_TIMER2, TMCSR_EN | 0xFFFFFE);
1112 }
1113}
1114
1115static void
1116jme_pcc_tasklet(unsigned long arg)
1117{
1118 struct jme_adapter *jme = (struct jme_adapter *)arg;
1119 struct net_device *netdev = jme->dev;
1120
1121 if (unlikely(test_bit(JME_FLAG_SHUTDOWN, &jme->flags))) {
1122 jme_shutdown_nic(jme);
1123 return;
1124 }
1125
1126 if (unlikely(!netif_carrier_ok(netdev) ||
1127 (atomic_read(&jme->link_changing) != 1)
1128 )) {
1129 jme_stop_pcc_timer(jme);
1130 return;
1131 }
1132
1133 if (!(test_bit(JME_FLAG_POLL, &jme->flags)))
1134 jme_dynamic_pcc(jme);
1135
1136 jme_start_pcc_timer(jme);
1137}
1138
1139static inline void
1140jme_polling_mode(struct jme_adapter *jme)
1141{
1142 jme_set_rx_pcc(jme, PCC_OFF);
1143}
1144
1145static inline void
1146jme_interrupt_mode(struct jme_adapter *jme)
1147{
1148 jme_set_rx_pcc(jme, PCC_P1);
1149}
1150
1151static inline int
1152jme_pseudo_hotplug_enabled(struct jme_adapter *jme)
1153{
1154 u32 apmc;
1155 apmc = jread32(jme, JME_APMC);
1156 return apmc & JME_APMC_PSEUDO_HP_EN;
1157}
1158
1159static void
1160jme_start_shutdown_timer(struct jme_adapter *jme)
1161{
1162 u32 apmc;
1163
1164 apmc = jread32(jme, JME_APMC) | JME_APMC_PCIE_SD_EN;
1165 apmc &= ~JME_APMC_EPIEN_CTRL;
1166 if (!no_extplug) {
1167 jwrite32f(jme, JME_APMC, apmc | JME_APMC_EPIEN_CTRL_EN);
1168 wmb();
1169 }
1170 jwrite32f(jme, JME_APMC, apmc);
1171
1172 jwrite32f(jme, JME_TIMER2, 0);
1173 set_bit(JME_FLAG_SHUTDOWN, &jme->flags);
1174 jwrite32(jme, JME_TMCSR,
1175 TMCSR_EN | ((0xFFFFFF - APMC_PHP_SHUTDOWN_DELAY) & TMCSR_CNT));
1176}
1177
1178static void
1179jme_stop_shutdown_timer(struct jme_adapter *jme)
1180{
1181 u32 apmc;
1182
1183 jwrite32f(jme, JME_TMCSR, 0);
1184 jwrite32f(jme, JME_TIMER2, 0);
1185 clear_bit(JME_FLAG_SHUTDOWN, &jme->flags);
1186
1187 apmc = jread32(jme, JME_APMC);
1188 apmc &= ~(JME_APMC_PCIE_SD_EN | JME_APMC_EPIEN_CTRL);
1189 jwrite32f(jme, JME_APMC, apmc | JME_APMC_EPIEN_CTRL_DIS);
1190 wmb();
1191 jwrite32f(jme, JME_APMC, apmc);
1192}
1193
1194static void
1195jme_link_change_tasklet(unsigned long arg)
1196{
1197 struct jme_adapter *jme = (struct jme_adapter *)arg;
1198 struct net_device *netdev = jme->dev;
1199 int rc;
1200
1201 while (!atomic_dec_and_test(&jme->link_changing)) {
1202 atomic_inc(&jme->link_changing);
1203 msg_intr(jme, "Get link change lock failed.\n");
1204 while (atomic_read(&jme->link_changing) != 1)
1205 msg_intr(jme, "Waiting link change lock.\n");
1206 }
1207
1208 if (jme_check_link(netdev, 1) && jme->old_mtu == netdev->mtu)
1209 goto out;
1210
1211 jme->old_mtu = netdev->mtu;
1212 netif_stop_queue(netdev);
1213 if (jme_pseudo_hotplug_enabled(jme))
1214 jme_stop_shutdown_timer(jme);
1215
1216 jme_stop_pcc_timer(jme);
1217 tasklet_disable(&jme->txclean_task);
1218 tasklet_disable(&jme->rxclean_task);
1219 tasklet_disable(&jme->rxempty_task);
1220
1221 if (netif_carrier_ok(netdev)) {
1222 jme_reset_ghc_speed(jme);
1223 jme_disable_rx_engine(jme);
1224 jme_disable_tx_engine(jme);
1225 jme_reset_mac_processor(jme);
1226 jme_free_rx_resources(jme);
1227 jme_free_tx_resources(jme);
1228
1229 if (test_bit(JME_FLAG_POLL, &jme->flags))
1230 jme_polling_mode(jme);
1231
1232 netif_carrier_off(netdev);
1233 }
1234
1235 jme_check_link(netdev, 0);
1236 if (netif_carrier_ok(netdev)) {
1237 rc = jme_setup_rx_resources(jme);
1238 if (rc) {
1239 jeprintk(jme->pdev, "Allocating resources for RX error"
1240 ", Device STOPPED!\n");
1241 goto out_enable_tasklet;
1242 }
1243
1244 rc = jme_setup_tx_resources(jme);
1245 if (rc) {
1246 jeprintk(jme->pdev, "Allocating resources for TX error"
1247 ", Device STOPPED!\n");
1248 goto err_out_free_rx_resources;
1249 }
1250
1251 jme_enable_rx_engine(jme);
1252 jme_enable_tx_engine(jme);
1253
1254 netif_start_queue(netdev);
1255
1256 if (test_bit(JME_FLAG_POLL, &jme->flags))
1257 jme_interrupt_mode(jme);
1258
1259 jme_start_pcc_timer(jme);
1260 } else if (jme_pseudo_hotplug_enabled(jme)) {
1261 jme_start_shutdown_timer(jme);
1262 }
1263
1264 goto out_enable_tasklet;
1265
1266err_out_free_rx_resources:
1267 jme_free_rx_resources(jme);
1268out_enable_tasklet:
1269 tasklet_enable(&jme->txclean_task);
1270 tasklet_hi_enable(&jme->rxclean_task);
1271 tasklet_hi_enable(&jme->rxempty_task);
1272out:
1273 atomic_inc(&jme->link_changing);
1274}
1275
1276static void
1277jme_rx_clean_tasklet(unsigned long arg)
1278{
1279 struct jme_adapter *jme = (struct jme_adapter *)arg;
1280 struct dynpcc_info *dpi = &(jme->dpi);
1281
1282 jme_process_receive(jme, jme->rx_ring_size);
1283 ++(dpi->intr_cnt);
1284
1285}
1286
1287static int
1288jme_poll(JME_NAPI_HOLDER(holder), JME_NAPI_WEIGHT(budget))
1289{
1290 struct jme_adapter *jme = jme_napi_priv(holder);
Guo-Fu Tseng95252232008-09-16 01:00:11 +08001291 int rest;
1292
1293 rest = jme_process_receive(jme, JME_NAPI_WEIGHT_VAL(budget));
1294
1295 while (atomic_read(&jme->rx_empty) > 0) {
1296 atomic_dec(&jme->rx_empty);
1297 ++(NET_STAT(jme).rx_dropped);
1298 jme_restart_rx_engine(jme);
1299 }
1300 atomic_inc(&jme->rx_empty);
1301
1302 if (rest) {
1303 JME_RX_COMPLETE(netdev, holder);
1304 jme_interrupt_mode(jme);
1305 }
1306
1307 JME_NAPI_WEIGHT_SET(budget, rest);
1308 return JME_NAPI_WEIGHT_VAL(budget) - rest;
1309}
1310
1311static void
1312jme_rx_empty_tasklet(unsigned long arg)
1313{
1314 struct jme_adapter *jme = (struct jme_adapter *)arg;
1315
1316 if (unlikely(atomic_read(&jme->link_changing) != 1))
1317 return;
1318
1319 if (unlikely(!netif_carrier_ok(jme->dev)))
1320 return;
1321
1322 msg_rx_status(jme, "RX Queue Full!\n");
1323
1324 jme_rx_clean_tasklet(arg);
1325
1326 while (atomic_read(&jme->rx_empty) > 0) {
1327 atomic_dec(&jme->rx_empty);
1328 ++(NET_STAT(jme).rx_dropped);
1329 jme_restart_rx_engine(jme);
1330 }
1331 atomic_inc(&jme->rx_empty);
1332}
1333
1334static void
1335jme_wake_queue_if_stopped(struct jme_adapter *jme)
1336{
Guo-Fu Tsengeacf69a2009-07-06 04:36:30 +00001337 struct jme_ring *txring = &(jme->txring[0]);
Guo-Fu Tseng95252232008-09-16 01:00:11 +08001338
1339 smp_wmb();
1340 if (unlikely(netif_queue_stopped(jme->dev) &&
1341 atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold))) {
1342 msg_tx_done(jme, "TX Queue Waked.\n");
1343 netif_wake_queue(jme->dev);
1344 }
1345
1346}
1347
1348static void
1349jme_tx_clean_tasklet(unsigned long arg)
1350{
1351 struct jme_adapter *jme = (struct jme_adapter *)arg;
1352 struct jme_ring *txring = &(jme->txring[0]);
1353 struct txdesc *txdesc = txring->desc;
1354 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi;
1355 int i, j, cnt = 0, max, err, mask;
1356
1357 tx_dbg(jme, "Into txclean.\n");
1358
1359 if (unlikely(!atomic_dec_and_test(&jme->tx_cleaning)))
1360 goto out;
1361
1362 if (unlikely(atomic_read(&jme->link_changing) != 1))
1363 goto out;
1364
1365 if (unlikely(!netif_carrier_ok(jme->dev)))
1366 goto out;
1367
1368 max = jme->tx_ring_size - atomic_read(&txring->nr_free);
1369 mask = jme->tx_ring_mask;
1370
1371 for (i = atomic_read(&txring->next_to_clean) ; cnt < max ; ) {
1372
1373 ctxbi = txbi + i;
1374
1375 if (likely(ctxbi->skb &&
1376 !(txdesc[i].descwb.flags & TXWBFLAG_OWN))) {
1377
1378 tx_dbg(jme, "txclean: %d+%d@%lu\n",
1379 i, ctxbi->nr_desc, jiffies);
1380
1381 err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR;
1382
1383 for (j = 1 ; j < ctxbi->nr_desc ; ++j) {
1384 ttxbi = txbi + ((i + j) & (mask));
1385 txdesc[(i + j) & (mask)].dw[0] = 0;
1386
1387 pci_unmap_page(jme->pdev,
1388 ttxbi->mapping,
1389 ttxbi->len,
1390 PCI_DMA_TODEVICE);
1391
1392 ttxbi->mapping = 0;
1393 ttxbi->len = 0;
1394 }
1395
1396 dev_kfree_skb(ctxbi->skb);
1397
1398 cnt += ctxbi->nr_desc;
1399
1400 if (unlikely(err)) {
1401 ++(NET_STAT(jme).tx_carrier_errors);
1402 } else {
1403 ++(NET_STAT(jme).tx_packets);
1404 NET_STAT(jme).tx_bytes += ctxbi->len;
1405 }
1406
1407 ctxbi->skb = NULL;
1408 ctxbi->len = 0;
1409 ctxbi->start_xmit = 0;
1410
1411 } else {
1412 break;
1413 }
1414
1415 i = (i + ctxbi->nr_desc) & mask;
1416
1417 ctxbi->nr_desc = 0;
1418 }
1419
1420 tx_dbg(jme, "txclean: done %d@%lu.\n", i, jiffies);
1421 atomic_set(&txring->next_to_clean, i);
1422 atomic_add(cnt, &txring->nr_free);
1423
1424 jme_wake_queue_if_stopped(jme);
1425
1426out:
1427 atomic_inc(&jme->tx_cleaning);
1428}
1429
1430static void
1431jme_intr_msi(struct jme_adapter *jme, u32 intrstat)
1432{
1433 /*
1434 * Disable interrupt
1435 */
1436 jwrite32f(jme, JME_IENC, INTR_ENABLE);
1437
1438 if (intrstat & (INTR_LINKCH | INTR_SWINTR)) {
1439 /*
1440 * Link change event is critical
1441 * all other events are ignored
1442 */
1443 jwrite32(jme, JME_IEVE, intrstat);
1444 tasklet_schedule(&jme->linkch_task);
1445 goto out_reenable;
1446 }
1447
1448 if (intrstat & INTR_TMINTR) {
1449 jwrite32(jme, JME_IEVE, INTR_TMINTR);
1450 tasklet_schedule(&jme->pcc_task);
1451 }
1452
1453 if (intrstat & (INTR_PCCTXTO | INTR_PCCTX)) {
1454 jwrite32(jme, JME_IEVE, INTR_PCCTXTO | INTR_PCCTX | INTR_TX0);
1455 tasklet_schedule(&jme->txclean_task);
1456 }
1457
1458 if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) {
1459 jwrite32(jme, JME_IEVE, (intrstat & (INTR_PCCRX0TO |
1460 INTR_PCCRX0 |
1461 INTR_RX0EMP)) |
1462 INTR_RX0);
1463 }
1464
1465 if (test_bit(JME_FLAG_POLL, &jme->flags)) {
1466 if (intrstat & INTR_RX0EMP)
1467 atomic_inc(&jme->rx_empty);
1468
1469 if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) {
1470 if (likely(JME_RX_SCHEDULE_PREP(jme))) {
1471 jme_polling_mode(jme);
1472 JME_RX_SCHEDULE(jme);
1473 }
1474 }
1475 } else {
1476 if (intrstat & INTR_RX0EMP) {
1477 atomic_inc(&jme->rx_empty);
1478 tasklet_hi_schedule(&jme->rxempty_task);
1479 } else if (intrstat & (INTR_PCCRX0TO | INTR_PCCRX0)) {
1480 tasklet_hi_schedule(&jme->rxclean_task);
1481 }
1482 }
1483
1484out_reenable:
1485 /*
1486 * Re-enable interrupt
1487 */
1488 jwrite32f(jme, JME_IENS, INTR_ENABLE);
1489}
1490
1491static irqreturn_t
1492jme_intr(int irq, void *dev_id)
1493{
1494 struct net_device *netdev = dev_id;
1495 struct jme_adapter *jme = netdev_priv(netdev);
1496 u32 intrstat;
1497
1498 intrstat = jread32(jme, JME_IEVE);
1499
1500 /*
1501 * Check if it's really an interrupt for us
1502 */
akeemting576b5222008-10-08 19:50:03 -07001503 if (unlikely((intrstat & INTR_ENABLE) == 0))
Guo-Fu Tseng95252232008-09-16 01:00:11 +08001504 return IRQ_NONE;
1505
1506 /*
1507 * Check if the device still exist
1508 */
1509 if (unlikely(intrstat == ~((typeof(intrstat))0)))
1510 return IRQ_NONE;
1511
1512 jme_intr_msi(jme, intrstat);
1513
1514 return IRQ_HANDLED;
1515}
1516
1517static irqreturn_t
1518jme_msi(int irq, void *dev_id)
1519{
1520 struct net_device *netdev = dev_id;
1521 struct jme_adapter *jme = netdev_priv(netdev);
1522 u32 intrstat;
1523
1524 pci_dma_sync_single_for_cpu(jme->pdev,
1525 jme->shadow_dma,
1526 sizeof(u32) * SHADOW_REG_NR,
1527 PCI_DMA_FROMDEVICE);
1528 intrstat = jme->shadow_regs[SHADOW_IEVE];
1529 jme->shadow_regs[SHADOW_IEVE] = 0;
1530
1531 jme_intr_msi(jme, intrstat);
1532
1533 return IRQ_HANDLED;
1534}
1535
1536static void
1537jme_reset_link(struct jme_adapter *jme)
1538{
1539 jwrite32(jme, JME_TMCSR, TMCSR_SWIT);
1540}
1541
1542static void
1543jme_restart_an(struct jme_adapter *jme)
1544{
1545 u32 bmcr;
1546
1547 spin_lock_bh(&jme->phy_lock);
1548 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1549 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
1550 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
1551 spin_unlock_bh(&jme->phy_lock);
1552}
1553
1554static int
1555jme_request_irq(struct jme_adapter *jme)
1556{
1557 int rc;
1558 struct net_device *netdev = jme->dev;
1559 irq_handler_t handler = jme_intr;
1560 int irq_flags = IRQF_SHARED;
1561
1562 if (!pci_enable_msi(jme->pdev)) {
1563 set_bit(JME_FLAG_MSI, &jme->flags);
1564 handler = jme_msi;
1565 irq_flags = 0;
1566 }
1567
1568 rc = request_irq(jme->pdev->irq, handler, irq_flags, netdev->name,
1569 netdev);
1570 if (rc) {
1571 jeprintk(jme->pdev,
1572 "Unable to request %s interrupt (return: %d)\n",
1573 test_bit(JME_FLAG_MSI, &jme->flags) ? "MSI" : "INTx",
1574 rc);
1575
1576 if (test_bit(JME_FLAG_MSI, &jme->flags)) {
1577 pci_disable_msi(jme->pdev);
1578 clear_bit(JME_FLAG_MSI, &jme->flags);
1579 }
1580 } else {
1581 netdev->irq = jme->pdev->irq;
1582 }
1583
1584 return rc;
1585}
1586
1587static void
1588jme_free_irq(struct jme_adapter *jme)
1589{
1590 free_irq(jme->pdev->irq, jme->dev);
1591 if (test_bit(JME_FLAG_MSI, &jme->flags)) {
1592 pci_disable_msi(jme->pdev);
1593 clear_bit(JME_FLAG_MSI, &jme->flags);
1594 jme->dev->irq = jme->pdev->irq;
1595 }
1596}
1597
1598static int
1599jme_open(struct net_device *netdev)
1600{
1601 struct jme_adapter *jme = netdev_priv(netdev);
1602 int rc;
1603
1604 jme_clear_pm(jme);
1605 JME_NAPI_ENABLE(jme);
1606
Guo-Fu Tseng38ed0c22009-07-06 04:37:52 +00001607 tasklet_enable(&jme->linkch_task);
Guo-Fu Tseng95252232008-09-16 01:00:11 +08001608 tasklet_enable(&jme->txclean_task);
1609 tasklet_hi_enable(&jme->rxclean_task);
1610 tasklet_hi_enable(&jme->rxempty_task);
1611
1612 rc = jme_request_irq(jme);
1613 if (rc)
1614 goto err_out;
1615
1616 jme_enable_shadow(jme);
1617 jme_start_irq(jme);
1618
1619 if (test_bit(JME_FLAG_SSET, &jme->flags))
1620 jme_set_settings(netdev, &jme->old_ecmd);
1621 else
1622 jme_reset_phy_processor(jme);
1623
1624 jme_reset_link(jme);
1625
1626 return 0;
1627
1628err_out:
1629 netif_stop_queue(netdev);
1630 netif_carrier_off(netdev);
1631 return rc;
1632}
1633
David S. Miller724f8802008-10-08 19:54:31 -07001634#ifdef CONFIG_PM
Guo-Fu Tseng95252232008-09-16 01:00:11 +08001635static void
1636jme_set_100m_half(struct jme_adapter *jme)
1637{
1638 u32 bmcr, tmp;
1639
1640 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1641 tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 |
1642 BMCR_SPEED1000 | BMCR_FULLDPLX);
1643 tmp |= BMCR_SPEED100;
1644
1645 if (bmcr != tmp)
1646 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, tmp);
1647
1648 if (jme->fpgaver)
1649 jwrite32(jme, JME_GHC, GHC_SPEED_100M | GHC_LINK_POLL);
1650 else
1651 jwrite32(jme, JME_GHC, GHC_SPEED_100M);
1652}
1653
1654#define JME_WAIT_LINK_TIME 2000 /* 2000ms */
1655static void
1656jme_wait_link(struct jme_adapter *jme)
1657{
1658 u32 phylink, to = JME_WAIT_LINK_TIME;
1659
1660 mdelay(1000);
1661 phylink = jme_linkstat_from_phy(jme);
1662 while (!(phylink & PHY_LINK_UP) && (to -= 10) > 0) {
1663 mdelay(10);
1664 phylink = jme_linkstat_from_phy(jme);
1665 }
1666}
David S. Miller724f8802008-10-08 19:54:31 -07001667#endif
Guo-Fu Tseng95252232008-09-16 01:00:11 +08001668
1669static inline void
1670jme_phy_off(struct jme_adapter *jme)
1671{
1672 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, BMCR_PDOWN);
1673}
1674
1675static int
1676jme_close(struct net_device *netdev)
1677{
1678 struct jme_adapter *jme = netdev_priv(netdev);
1679
1680 netif_stop_queue(netdev);
1681 netif_carrier_off(netdev);
1682
1683 jme_stop_irq(jme);
1684 jme_disable_shadow(jme);
1685 jme_free_irq(jme);
1686
1687 JME_NAPI_DISABLE(jme);
1688
Guo-Fu Tseng38ed0c22009-07-06 04:37:52 +00001689 tasklet_disable(&jme->linkch_task);
1690 tasklet_disable(&jme->txclean_task);
1691 tasklet_disable(&jme->rxclean_task);
1692 tasklet_disable(&jme->rxempty_task);
Guo-Fu Tseng95252232008-09-16 01:00:11 +08001693
1694 jme_reset_ghc_speed(jme);
1695 jme_disable_rx_engine(jme);
1696 jme_disable_tx_engine(jme);
1697 jme_reset_mac_processor(jme);
1698 jme_free_rx_resources(jme);
1699 jme_free_tx_resources(jme);
1700 jme->phylink = 0;
1701 jme_phy_off(jme);
1702
1703 return 0;
1704}
1705
1706static int
1707jme_alloc_txdesc(struct jme_adapter *jme,
1708 struct sk_buff *skb)
1709{
Guo-Fu Tsengeacf69a2009-07-06 04:36:30 +00001710 struct jme_ring *txring = &(jme->txring[0]);
Guo-Fu Tseng95252232008-09-16 01:00:11 +08001711 int idx, nr_alloc, mask = jme->tx_ring_mask;
1712
1713 idx = txring->next_to_use;
1714 nr_alloc = skb_shinfo(skb)->nr_frags + 2;
1715
1716 if (unlikely(atomic_read(&txring->nr_free) < nr_alloc))
1717 return -1;
1718
1719 atomic_sub(nr_alloc, &txring->nr_free);
1720
1721 txring->next_to_use = (txring->next_to_use + nr_alloc) & mask;
1722
1723 return idx;
1724}
1725
1726static void
1727jme_fill_tx_map(struct pci_dev *pdev,
1728 struct txdesc *txdesc,
1729 struct jme_buffer_info *txbi,
1730 struct page *page,
1731 u32 page_offset,
1732 u32 len,
1733 u8 hidma)
1734{
1735 dma_addr_t dmaaddr;
1736
1737 dmaaddr = pci_map_page(pdev,
1738 page,
1739 page_offset,
1740 len,
1741 PCI_DMA_TODEVICE);
1742
1743 pci_dma_sync_single_for_device(pdev,
1744 dmaaddr,
1745 len,
1746 PCI_DMA_TODEVICE);
1747
1748 txdesc->dw[0] = 0;
1749 txdesc->dw[1] = 0;
1750 txdesc->desc2.flags = TXFLAG_OWN;
1751 txdesc->desc2.flags |= (hidma) ? TXFLAG_64BIT : 0;
1752 txdesc->desc2.datalen = cpu_to_le16(len);
1753 txdesc->desc2.bufaddrh = cpu_to_le32((__u64)dmaaddr >> 32);
1754 txdesc->desc2.bufaddrl = cpu_to_le32(
1755 (__u64)dmaaddr & 0xFFFFFFFFUL);
1756
1757 txbi->mapping = dmaaddr;
1758 txbi->len = len;
1759}
1760
1761static void
1762jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
1763{
Guo-Fu Tsengeacf69a2009-07-06 04:36:30 +00001764 struct jme_ring *txring = &(jme->txring[0]);
Guo-Fu Tseng95252232008-09-16 01:00:11 +08001765 struct txdesc *txdesc = txring->desc, *ctxdesc;
1766 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
1767 u8 hidma = jme->dev->features & NETIF_F_HIGHDMA;
1768 int i, nr_frags = skb_shinfo(skb)->nr_frags;
1769 int mask = jme->tx_ring_mask;
1770 struct skb_frag_struct *frag;
1771 u32 len;
1772
1773 for (i = 0 ; i < nr_frags ; ++i) {
1774 frag = &skb_shinfo(skb)->frags[i];
1775 ctxdesc = txdesc + ((idx + i + 2) & (mask));
1776 ctxbi = txbi + ((idx + i + 2) & (mask));
1777
1778 jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, frag->page,
1779 frag->page_offset, frag->size, hidma);
1780 }
1781
1782 len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
1783 ctxdesc = txdesc + ((idx + 1) & (mask));
1784 ctxbi = txbi + ((idx + 1) & (mask));
1785 jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
1786 offset_in_page(skb->data), len, hidma);
1787
1788}
1789
1790static int
1791jme_expand_header(struct jme_adapter *jme, struct sk_buff *skb)
1792{
1793 if (unlikely(skb_shinfo(skb)->gso_size &&
1794 skb_header_cloned(skb) &&
1795 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) {
1796 dev_kfree_skb(skb);
1797 return -1;
1798 }
1799
1800 return 0;
1801}
1802
1803static int
Harvey Harrison31c221c2008-11-19 15:50:59 -08001804jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags)
Guo-Fu Tseng95252232008-09-16 01:00:11 +08001805{
Harvey Harrison31c221c2008-11-19 15:50:59 -08001806 *mss = cpu_to_le16(skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT);
Guo-Fu Tseng95252232008-09-16 01:00:11 +08001807 if (*mss) {
1808 *flags |= TXFLAG_LSEN;
1809
1810 if (skb->protocol == htons(ETH_P_IP)) {
1811 struct iphdr *iph = ip_hdr(skb);
1812
1813 iph->check = 0;
1814 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1815 iph->daddr, 0,
1816 IPPROTO_TCP,
1817 0);
1818 } else {
1819 struct ipv6hdr *ip6h = ipv6_hdr(skb);
1820
1821 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ip6h->saddr,
1822 &ip6h->daddr, 0,
1823 IPPROTO_TCP,
1824 0);
1825 }
1826
1827 return 0;
1828 }
1829
1830 return 1;
1831}
1832
1833static void
1834jme_tx_csum(struct jme_adapter *jme, struct sk_buff *skb, u8 *flags)
1835{
1836 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1837 u8 ip_proto;
1838
1839 switch (skb->protocol) {
1840 case htons(ETH_P_IP):
1841 ip_proto = ip_hdr(skb)->protocol;
1842 break;
1843 case htons(ETH_P_IPV6):
1844 ip_proto = ipv6_hdr(skb)->nexthdr;
1845 break;
1846 default:
1847 ip_proto = 0;
1848 break;
1849 }
1850
1851 switch (ip_proto) {
1852 case IPPROTO_TCP:
1853 *flags |= TXFLAG_TCPCS;
1854 break;
1855 case IPPROTO_UDP:
1856 *flags |= TXFLAG_UDPCS;
1857 break;
1858 default:
1859 msg_tx_err(jme, "Error upper layer protocol.\n");
1860 break;
1861 }
1862 }
1863}
1864
1865static inline void
Harvey Harrison31c221c2008-11-19 15:50:59 -08001866jme_tx_vlan(struct sk_buff *skb, __le16 *vlan, u8 *flags)
Guo-Fu Tseng95252232008-09-16 01:00:11 +08001867{
1868 if (vlan_tx_tag_present(skb)) {
1869 *flags |= TXFLAG_TAGON;
Harvey Harrison31c221c2008-11-19 15:50:59 -08001870 *vlan = cpu_to_le16(vlan_tx_tag_get(skb));
Guo-Fu Tseng95252232008-09-16 01:00:11 +08001871 }
1872}
1873
1874static int
Guo-Fu Tseng7f7fd2d2009-02-27 17:57:01 +00001875jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
Guo-Fu Tseng95252232008-09-16 01:00:11 +08001876{
Guo-Fu Tsengeacf69a2009-07-06 04:36:30 +00001877 struct jme_ring *txring = &(jme->txring[0]);
Guo-Fu Tseng95252232008-09-16 01:00:11 +08001878 struct txdesc *txdesc;
1879 struct jme_buffer_info *txbi;
1880 u8 flags;
1881
1882 txdesc = (struct txdesc *)txring->desc + idx;
1883 txbi = txring->bufinf + idx;
1884
1885 txdesc->dw[0] = 0;
1886 txdesc->dw[1] = 0;
1887 txdesc->dw[2] = 0;
1888 txdesc->dw[3] = 0;
1889 txdesc->desc1.pktsize = cpu_to_le16(skb->len);
1890 /*
1891 * Set OWN bit at final.
1892 * When kernel transmit faster than NIC.
1893 * And NIC trying to send this descriptor before we tell
1894 * it to start sending this TX queue.
1895 * Other fields are already filled correctly.
1896 */
1897 wmb();
1898 flags = TXFLAG_OWN | TXFLAG_INT;
1899 /*
1900 * Set checksum flags while not tso
1901 */
1902 if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags))
1903 jme_tx_csum(jme, skb, &flags);
1904 jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags);
Guo-Fu Tseng7f7fd2d2009-02-27 17:57:01 +00001905 jme_map_tx_skb(jme, skb, idx);
Guo-Fu Tseng95252232008-09-16 01:00:11 +08001906 txdesc->desc1.flags = flags;
1907 /*
1908 * Set tx buffer info after telling NIC to send
1909 * For better tx_clean timing
1910 */
1911 wmb();
1912 txbi->nr_desc = skb_shinfo(skb)->nr_frags + 2;
1913 txbi->skb = skb;
1914 txbi->len = skb->len;
1915 txbi->start_xmit = jiffies;
1916 if (!txbi->start_xmit)
1917 txbi->start_xmit = (0UL-1);
1918
1919 return 0;
1920}
1921
1922static void
1923jme_stop_queue_if_full(struct jme_adapter *jme)
1924{
Guo-Fu Tsengeacf69a2009-07-06 04:36:30 +00001925 struct jme_ring *txring = &(jme->txring[0]);
Guo-Fu Tseng95252232008-09-16 01:00:11 +08001926 struct jme_buffer_info *txbi = txring->bufinf;
1927 int idx = atomic_read(&txring->next_to_clean);
1928
1929 txbi += idx;
1930
1931 smp_wmb();
1932 if (unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) {
1933 netif_stop_queue(jme->dev);
1934 msg_tx_queued(jme, "TX Queue Paused.\n");
1935 smp_wmb();
1936 if (atomic_read(&txring->nr_free)
1937 >= (jme->tx_wake_threshold)) {
1938 netif_wake_queue(jme->dev);
1939 msg_tx_queued(jme, "TX Queue Fast Waked.\n");
1940 }
1941 }
1942
1943 if (unlikely(txbi->start_xmit &&
1944 (jiffies - txbi->start_xmit) >= TX_TIMEOUT &&
1945 txbi->skb)) {
1946 netif_stop_queue(jme->dev);
1947 msg_tx_queued(jme, "TX Queue Stopped %d@%lu.\n", idx, jiffies);
1948 }
1949}
1950
1951/*
1952 * This function is already protected by netif_tx_lock()
1953 */
1954
1955static int
1956jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1957{
1958 struct jme_adapter *jme = netdev_priv(netdev);
1959 int idx;
1960
1961 if (unlikely(jme_expand_header(jme, skb))) {
1962 ++(NET_STAT(jme).tx_dropped);
1963 return NETDEV_TX_OK;
1964 }
1965
1966 idx = jme_alloc_txdesc(jme, skb);
1967
1968 if (unlikely(idx < 0)) {
1969 netif_stop_queue(netdev);
1970 msg_tx_err(jme, "BUG! Tx ring full when queue awake!\n");
1971
1972 return NETDEV_TX_BUSY;
1973 }
1974
Guo-Fu Tseng7f7fd2d2009-02-27 17:57:01 +00001975 jme_fill_tx_desc(jme, skb, idx);
Guo-Fu Tseng95252232008-09-16 01:00:11 +08001976
1977 jwrite32(jme, JME_TXCS, jme->reg_txcs |
1978 TXCS_SELECT_QUEUE0 |
1979 TXCS_QUEUE0S |
1980 TXCS_ENABLE);
Guo-Fu Tseng95252232008-09-16 01:00:11 +08001981
1982 tx_dbg(jme, "xmit: %d+%d@%lu\n", idx,
1983 skb_shinfo(skb)->nr_frags + 2,
1984 jiffies);
1985 jme_stop_queue_if_full(jme);
1986
1987 return NETDEV_TX_OK;
1988}
1989
1990static int
1991jme_set_macaddr(struct net_device *netdev, void *p)
1992{
1993 struct jme_adapter *jme = netdev_priv(netdev);
1994 struct sockaddr *addr = p;
1995 u32 val;
1996
1997 if (netif_running(netdev))
1998 return -EBUSY;
1999
2000 spin_lock_bh(&jme->macaddr_lock);
2001 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2002
2003 val = (addr->sa_data[3] & 0xff) << 24 |
2004 (addr->sa_data[2] & 0xff) << 16 |
2005 (addr->sa_data[1] & 0xff) << 8 |
2006 (addr->sa_data[0] & 0xff);
2007 jwrite32(jme, JME_RXUMA_LO, val);
2008 val = (addr->sa_data[5] & 0xff) << 8 |
2009 (addr->sa_data[4] & 0xff);
2010 jwrite32(jme, JME_RXUMA_HI, val);
2011 spin_unlock_bh(&jme->macaddr_lock);
2012
2013 return 0;
2014}
2015
2016static void
2017jme_set_multi(struct net_device *netdev)
2018{
2019 struct jme_adapter *jme = netdev_priv(netdev);
2020 u32 mc_hash[2] = {};
2021 int i;
2022
2023 spin_lock_bh(&jme->rxmcs_lock);
2024
2025 jme->reg_rxmcs |= RXMCS_BRDFRAME | RXMCS_UNIFRAME;
2026
2027 if (netdev->flags & IFF_PROMISC) {
2028 jme->reg_rxmcs |= RXMCS_ALLFRAME;
2029 } else if (netdev->flags & IFF_ALLMULTI) {
2030 jme->reg_rxmcs |= RXMCS_ALLMULFRAME;
2031 } else if (netdev->flags & IFF_MULTICAST) {
2032 struct dev_mc_list *mclist;
2033 int bit_nr;
2034
2035 jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED;
2036 for (i = 0, mclist = netdev->mc_list;
2037 mclist && i < netdev->mc_count;
2038 ++i, mclist = mclist->next) {
2039
2040 bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3F;
2041 mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F);
2042 }
2043
2044 jwrite32(jme, JME_RXMCHT_LO, mc_hash[0]);
2045 jwrite32(jme, JME_RXMCHT_HI, mc_hash[1]);
2046 }
2047
2048 wmb();
2049 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
2050
2051 spin_unlock_bh(&jme->rxmcs_lock);
2052}
2053
2054static int
2055jme_change_mtu(struct net_device *netdev, int new_mtu)
2056{
2057 struct jme_adapter *jme = netdev_priv(netdev);
2058
2059 if (new_mtu == jme->old_mtu)
2060 return 0;
2061
2062 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
2063 ((new_mtu) < IPV6_MIN_MTU))
2064 return -EINVAL;
2065
2066 if (new_mtu > 4000) {
2067 jme->reg_rxcs &= ~RXCS_FIFOTHNP;
2068 jme->reg_rxcs |= RXCS_FIFOTHNP_64QW;
2069 jme_restart_rx_engine(jme);
2070 } else {
2071 jme->reg_rxcs &= ~RXCS_FIFOTHNP;
2072 jme->reg_rxcs |= RXCS_FIFOTHNP_128QW;
2073 jme_restart_rx_engine(jme);
2074 }
2075
2076 if (new_mtu > 1900) {
2077 netdev->features &= ~(NETIF_F_HW_CSUM |
2078 NETIF_F_TSO |
2079 NETIF_F_TSO6);
2080 } else {
2081 if (test_bit(JME_FLAG_TXCSUM, &jme->flags))
2082 netdev->features |= NETIF_F_HW_CSUM;
2083 if (test_bit(JME_FLAG_TSO, &jme->flags))
2084 netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
2085 }
2086
2087 netdev->mtu = new_mtu;
2088 jme_reset_link(jme);
2089
2090 return 0;
2091}
2092
2093static void
2094jme_tx_timeout(struct net_device *netdev)
2095{
2096 struct jme_adapter *jme = netdev_priv(netdev);
2097
2098 jme->phylink = 0;
2099 jme_reset_phy_processor(jme);
2100 if (test_bit(JME_FLAG_SSET, &jme->flags))
2101 jme_set_settings(netdev, &jme->old_ecmd);
2102
2103 /*
2104 * Force to Reset the link again
2105 */
2106 jme_reset_link(jme);
2107}
2108
2109static void
2110jme_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
2111{
2112 struct jme_adapter *jme = netdev_priv(netdev);
2113
2114 jme->vlgrp = grp;
2115}
2116
2117static void
2118jme_get_drvinfo(struct net_device *netdev,
2119 struct ethtool_drvinfo *info)
2120{
2121 struct jme_adapter *jme = netdev_priv(netdev);
2122
2123 strcpy(info->driver, DRV_NAME);
2124 strcpy(info->version, DRV_VERSION);
2125 strcpy(info->bus_info, pci_name(jme->pdev));
2126}
2127
2128static int
2129jme_get_regs_len(struct net_device *netdev)
2130{
2131 return JME_REG_LEN;
2132}
2133
2134static void
2135mmapio_memcpy(struct jme_adapter *jme, u32 *p, u32 reg, int len)
2136{
2137 int i;
2138
2139 for (i = 0 ; i < len ; i += 4)
2140 p[i >> 2] = jread32(jme, reg + i);
2141}
2142
2143static void
2144mdio_memcpy(struct jme_adapter *jme, u32 *p, int reg_nr)
2145{
2146 int i;
2147 u16 *p16 = (u16 *)p;
2148
2149 for (i = 0 ; i < reg_nr ; ++i)
2150 p16[i] = jme_mdio_read(jme->dev, jme->mii_if.phy_id, i);
2151}
2152
2153static void
2154jme_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
2155{
2156 struct jme_adapter *jme = netdev_priv(netdev);
2157 u32 *p32 = (u32 *)p;
2158
2159 memset(p, 0xFF, JME_REG_LEN);
2160
2161 regs->version = 1;
2162 mmapio_memcpy(jme, p32, JME_MAC, JME_MAC_LEN);
2163
2164 p32 += 0x100 >> 2;
2165 mmapio_memcpy(jme, p32, JME_PHY, JME_PHY_LEN);
2166
2167 p32 += 0x100 >> 2;
2168 mmapio_memcpy(jme, p32, JME_MISC, JME_MISC_LEN);
2169
2170 p32 += 0x100 >> 2;
2171 mmapio_memcpy(jme, p32, JME_RSS, JME_RSS_LEN);
2172
2173 p32 += 0x100 >> 2;
2174 mdio_memcpy(jme, p32, JME_PHY_REG_NR);
2175}
2176
2177static int
2178jme_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
2179{
2180 struct jme_adapter *jme = netdev_priv(netdev);
2181
2182 ecmd->tx_coalesce_usecs = PCC_TX_TO;
2183 ecmd->tx_max_coalesced_frames = PCC_TX_CNT;
2184
2185 if (test_bit(JME_FLAG_POLL, &jme->flags)) {
2186 ecmd->use_adaptive_rx_coalesce = false;
2187 ecmd->rx_coalesce_usecs = 0;
2188 ecmd->rx_max_coalesced_frames = 0;
2189 return 0;
2190 }
2191
2192 ecmd->use_adaptive_rx_coalesce = true;
2193
2194 switch (jme->dpi.cur) {
2195 case PCC_P1:
2196 ecmd->rx_coalesce_usecs = PCC_P1_TO;
2197 ecmd->rx_max_coalesced_frames = PCC_P1_CNT;
2198 break;
2199 case PCC_P2:
2200 ecmd->rx_coalesce_usecs = PCC_P2_TO;
2201 ecmd->rx_max_coalesced_frames = PCC_P2_CNT;
2202 break;
2203 case PCC_P3:
2204 ecmd->rx_coalesce_usecs = PCC_P3_TO;
2205 ecmd->rx_max_coalesced_frames = PCC_P3_CNT;
2206 break;
2207 default:
2208 break;
2209 }
2210
2211 return 0;
2212}
2213
2214static int
2215jme_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
2216{
2217 struct jme_adapter *jme = netdev_priv(netdev);
2218 struct dynpcc_info *dpi = &(jme->dpi);
2219
2220 if (netif_running(netdev))
2221 return -EBUSY;
2222
2223 if (ecmd->use_adaptive_rx_coalesce
2224 && test_bit(JME_FLAG_POLL, &jme->flags)) {
2225 clear_bit(JME_FLAG_POLL, &jme->flags);
2226 jme->jme_rx = netif_rx;
2227 jme->jme_vlan_rx = vlan_hwaccel_rx;
2228 dpi->cur = PCC_P1;
2229 dpi->attempt = PCC_P1;
2230 dpi->cnt = 0;
2231 jme_set_rx_pcc(jme, PCC_P1);
2232 jme_interrupt_mode(jme);
2233 } else if (!(ecmd->use_adaptive_rx_coalesce)
2234 && !(test_bit(JME_FLAG_POLL, &jme->flags))) {
2235 set_bit(JME_FLAG_POLL, &jme->flags);
2236 jme->jme_rx = netif_receive_skb;
2237 jme->jme_vlan_rx = vlan_hwaccel_receive_skb;
2238 jme_interrupt_mode(jme);
2239 }
2240
2241 return 0;
2242}
2243
2244static void
2245jme_get_pauseparam(struct net_device *netdev,
2246 struct ethtool_pauseparam *ecmd)
2247{
2248 struct jme_adapter *jme = netdev_priv(netdev);
2249 u32 val;
2250
2251 ecmd->tx_pause = (jme->reg_txpfc & TXPFC_PF_EN) != 0;
2252 ecmd->rx_pause = (jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0;
2253
2254 spin_lock_bh(&jme->phy_lock);
2255 val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
2256 spin_unlock_bh(&jme->phy_lock);
2257
2258 ecmd->autoneg =
2259 (val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0;
2260}
2261
2262static int
2263jme_set_pauseparam(struct net_device *netdev,
2264 struct ethtool_pauseparam *ecmd)
2265{
2266 struct jme_adapter *jme = netdev_priv(netdev);
2267 u32 val;
2268
2269 if (((jme->reg_txpfc & TXPFC_PF_EN) != 0) ^
2270 (ecmd->tx_pause != 0)) {
2271
2272 if (ecmd->tx_pause)
2273 jme->reg_txpfc |= TXPFC_PF_EN;
2274 else
2275 jme->reg_txpfc &= ~TXPFC_PF_EN;
2276
2277 jwrite32(jme, JME_TXPFC, jme->reg_txpfc);
2278 }
2279
2280 spin_lock_bh(&jme->rxmcs_lock);
2281 if (((jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0) ^
2282 (ecmd->rx_pause != 0)) {
2283
2284 if (ecmd->rx_pause)
2285 jme->reg_rxmcs |= RXMCS_FLOWCTRL;
2286 else
2287 jme->reg_rxmcs &= ~RXMCS_FLOWCTRL;
2288
2289 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
2290 }
2291 spin_unlock_bh(&jme->rxmcs_lock);
2292
2293 spin_lock_bh(&jme->phy_lock);
2294 val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
2295 if (((val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0) ^
2296 (ecmd->autoneg != 0)) {
2297
2298 if (ecmd->autoneg)
2299 val |= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2300 else
2301 val &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2302
2303 jme_mdio_write(jme->dev, jme->mii_if.phy_id,
2304 MII_ADVERTISE, val);
2305 }
2306 spin_unlock_bh(&jme->phy_lock);
2307
2308 return 0;
2309}
2310
2311static void
2312jme_get_wol(struct net_device *netdev,
2313 struct ethtool_wolinfo *wol)
2314{
2315 struct jme_adapter *jme = netdev_priv(netdev);
2316
2317 wol->supported = WAKE_MAGIC | WAKE_PHY;
2318
2319 wol->wolopts = 0;
2320
2321 if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
2322 wol->wolopts |= WAKE_PHY;
2323
2324 if (jme->reg_pmcs & PMCS_MFEN)
2325 wol->wolopts |= WAKE_MAGIC;
2326
2327}
2328
2329static int
2330jme_set_wol(struct net_device *netdev,
2331 struct ethtool_wolinfo *wol)
2332{
2333 struct jme_adapter *jme = netdev_priv(netdev);
2334
2335 if (wol->wolopts & (WAKE_MAGICSECURE |
2336 WAKE_UCAST |
2337 WAKE_MCAST |
2338 WAKE_BCAST |
2339 WAKE_ARP))
2340 return -EOPNOTSUPP;
2341
2342 jme->reg_pmcs = 0;
2343
2344 if (wol->wolopts & WAKE_PHY)
2345 jme->reg_pmcs |= PMCS_LFEN | PMCS_LREN;
2346
2347 if (wol->wolopts & WAKE_MAGIC)
2348 jme->reg_pmcs |= PMCS_MFEN;
2349
2350 jwrite32(jme, JME_PMCS, jme->reg_pmcs);
2351
2352 return 0;
2353}
2354
2355static int
2356jme_get_settings(struct net_device *netdev,
2357 struct ethtool_cmd *ecmd)
2358{
2359 struct jme_adapter *jme = netdev_priv(netdev);
2360 int rc;
2361
2362 spin_lock_bh(&jme->phy_lock);
2363 rc = mii_ethtool_gset(&(jme->mii_if), ecmd);
2364 spin_unlock_bh(&jme->phy_lock);
2365 return rc;
2366}
2367
2368static int
2369jme_set_settings(struct net_device *netdev,
2370 struct ethtool_cmd *ecmd)
2371{
2372 struct jme_adapter *jme = netdev_priv(netdev);
2373 int rc, fdc = 0;
2374
2375 if (ecmd->speed == SPEED_1000 && ecmd->autoneg != AUTONEG_ENABLE)
2376 return -EINVAL;
2377
2378 if (jme->mii_if.force_media &&
2379 ecmd->autoneg != AUTONEG_ENABLE &&
2380 (jme->mii_if.full_duplex != ecmd->duplex))
2381 fdc = 1;
2382
2383 spin_lock_bh(&jme->phy_lock);
2384 rc = mii_ethtool_sset(&(jme->mii_if), ecmd);
2385 spin_unlock_bh(&jme->phy_lock);
2386
2387 if (!rc && fdc)
2388 jme_reset_link(jme);
2389
2390 if (!rc) {
2391 set_bit(JME_FLAG_SSET, &jme->flags);
2392 jme->old_ecmd = *ecmd;
2393 }
2394
2395 return rc;
2396}
2397
2398static u32
2399jme_get_link(struct net_device *netdev)
2400{
2401 struct jme_adapter *jme = netdev_priv(netdev);
2402 return jread32(jme, JME_PHY_LINK) & PHY_LINK_UP;
2403}
2404
2405static u32
2406jme_get_msglevel(struct net_device *netdev)
2407{
2408 struct jme_adapter *jme = netdev_priv(netdev);
2409 return jme->msg_enable;
2410}
2411
2412static void
2413jme_set_msglevel(struct net_device *netdev, u32 value)
2414{
2415 struct jme_adapter *jme = netdev_priv(netdev);
2416 jme->msg_enable = value;
2417}
2418
2419static u32
2420jme_get_rx_csum(struct net_device *netdev)
2421{
2422 struct jme_adapter *jme = netdev_priv(netdev);
2423 return jme->reg_rxmcs & RXMCS_CHECKSUM;
2424}
2425
2426static int
2427jme_set_rx_csum(struct net_device *netdev, u32 on)
2428{
2429 struct jme_adapter *jme = netdev_priv(netdev);
2430
2431 spin_lock_bh(&jme->rxmcs_lock);
2432 if (on)
2433 jme->reg_rxmcs |= RXMCS_CHECKSUM;
2434 else
2435 jme->reg_rxmcs &= ~RXMCS_CHECKSUM;
2436 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
2437 spin_unlock_bh(&jme->rxmcs_lock);
2438
2439 return 0;
2440}
2441
2442static int
2443jme_set_tx_csum(struct net_device *netdev, u32 on)
2444{
2445 struct jme_adapter *jme = netdev_priv(netdev);
2446
2447 if (on) {
2448 set_bit(JME_FLAG_TXCSUM, &jme->flags);
2449 if (netdev->mtu <= 1900)
2450 netdev->features |= NETIF_F_HW_CSUM;
2451 } else {
2452 clear_bit(JME_FLAG_TXCSUM, &jme->flags);
2453 netdev->features &= ~NETIF_F_HW_CSUM;
2454 }
2455
2456 return 0;
2457}
2458
2459static int
2460jme_set_tso(struct net_device *netdev, u32 on)
2461{
2462 struct jme_adapter *jme = netdev_priv(netdev);
2463
2464 if (on) {
2465 set_bit(JME_FLAG_TSO, &jme->flags);
2466 if (netdev->mtu <= 1900)
2467 netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
2468 } else {
2469 clear_bit(JME_FLAG_TSO, &jme->flags);
2470 netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
2471 }
2472
2473 return 0;
2474}
2475
2476static int
2477jme_nway_reset(struct net_device *netdev)
2478{
2479 struct jme_adapter *jme = netdev_priv(netdev);
2480 jme_restart_an(jme);
2481 return 0;
2482}
2483
2484static u8
2485jme_smb_read(struct jme_adapter *jme, unsigned int addr)
2486{
2487 u32 val;
2488 int to;
2489
2490 val = jread32(jme, JME_SMBCSR);
2491 to = JME_SMB_BUSY_TIMEOUT;
2492 while ((val & SMBCSR_BUSY) && --to) {
2493 msleep(1);
2494 val = jread32(jme, JME_SMBCSR);
2495 }
2496 if (!to) {
2497 msg_hw(jme, "SMB Bus Busy.\n");
2498 return 0xFF;
2499 }
2500
2501 jwrite32(jme, JME_SMBINTF,
2502 ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) |
2503 SMBINTF_HWRWN_READ |
2504 SMBINTF_HWCMD);
2505
2506 val = jread32(jme, JME_SMBINTF);
2507 to = JME_SMB_BUSY_TIMEOUT;
2508 while ((val & SMBINTF_HWCMD) && --to) {
2509 msleep(1);
2510 val = jread32(jme, JME_SMBINTF);
2511 }
2512 if (!to) {
2513 msg_hw(jme, "SMB Bus Busy.\n");
2514 return 0xFF;
2515 }
2516
2517 return (val & SMBINTF_HWDATR) >> SMBINTF_HWDATR_SHIFT;
2518}
2519
2520static void
2521jme_smb_write(struct jme_adapter *jme, unsigned int addr, u8 data)
2522{
2523 u32 val;
2524 int to;
2525
2526 val = jread32(jme, JME_SMBCSR);
2527 to = JME_SMB_BUSY_TIMEOUT;
2528 while ((val & SMBCSR_BUSY) && --to) {
2529 msleep(1);
2530 val = jread32(jme, JME_SMBCSR);
2531 }
2532 if (!to) {
2533 msg_hw(jme, "SMB Bus Busy.\n");
2534 return;
2535 }
2536
2537 jwrite32(jme, JME_SMBINTF,
2538 ((data << SMBINTF_HWDATW_SHIFT) & SMBINTF_HWDATW) |
2539 ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) |
2540 SMBINTF_HWRWN_WRITE |
2541 SMBINTF_HWCMD);
2542
2543 val = jread32(jme, JME_SMBINTF);
2544 to = JME_SMB_BUSY_TIMEOUT;
2545 while ((val & SMBINTF_HWCMD) && --to) {
2546 msleep(1);
2547 val = jread32(jme, JME_SMBINTF);
2548 }
2549 if (!to) {
2550 msg_hw(jme, "SMB Bus Busy.\n");
2551 return;
2552 }
2553
2554 mdelay(2);
2555}
2556
2557static int
2558jme_get_eeprom_len(struct net_device *netdev)
2559{
2560 struct jme_adapter *jme = netdev_priv(netdev);
2561 u32 val;
2562 val = jread32(jme, JME_SMBCSR);
2563 return (val & SMBCSR_EEPROMD) ? JME_SMB_LEN : 0;
2564}
2565
2566static int
2567jme_get_eeprom(struct net_device *netdev,
2568 struct ethtool_eeprom *eeprom, u8 *data)
2569{
2570 struct jme_adapter *jme = netdev_priv(netdev);
2571 int i, offset = eeprom->offset, len = eeprom->len;
2572
2573 /*
2574 * ethtool will check the boundary for us
2575 */
2576 eeprom->magic = JME_EEPROM_MAGIC;
2577 for (i = 0 ; i < len ; ++i)
2578 data[i] = jme_smb_read(jme, i + offset);
2579
2580 return 0;
2581}
2582
2583static int
2584jme_set_eeprom(struct net_device *netdev,
2585 struct ethtool_eeprom *eeprom, u8 *data)
2586{
2587 struct jme_adapter *jme = netdev_priv(netdev);
2588 int i, offset = eeprom->offset, len = eeprom->len;
2589
2590 if (eeprom->magic != JME_EEPROM_MAGIC)
2591 return -EINVAL;
2592
2593 /*
2594 * ethtool will check the boundary for us
2595 */
2596 for (i = 0 ; i < len ; ++i)
2597 jme_smb_write(jme, i + offset, data[i]);
2598
2599 return 0;
2600}
2601
2602static const struct ethtool_ops jme_ethtool_ops = {
2603 .get_drvinfo = jme_get_drvinfo,
2604 .get_regs_len = jme_get_regs_len,
2605 .get_regs = jme_get_regs,
2606 .get_coalesce = jme_get_coalesce,
2607 .set_coalesce = jme_set_coalesce,
2608 .get_pauseparam = jme_get_pauseparam,
2609 .set_pauseparam = jme_set_pauseparam,
2610 .get_wol = jme_get_wol,
2611 .set_wol = jme_set_wol,
2612 .get_settings = jme_get_settings,
2613 .set_settings = jme_set_settings,
2614 .get_link = jme_get_link,
2615 .get_msglevel = jme_get_msglevel,
2616 .set_msglevel = jme_set_msglevel,
2617 .get_rx_csum = jme_get_rx_csum,
2618 .set_rx_csum = jme_set_rx_csum,
2619 .set_tx_csum = jme_set_tx_csum,
2620 .set_tso = jme_set_tso,
2621 .set_sg = ethtool_op_set_sg,
2622 .nway_reset = jme_nway_reset,
2623 .get_eeprom_len = jme_get_eeprom_len,
2624 .get_eeprom = jme_get_eeprom,
2625 .set_eeprom = jme_set_eeprom,
2626};
2627
2628static int
2629jme_pci_dma64(struct pci_dev *pdev)
2630{
Guo-Fu Tseng814c01d2009-02-27 17:59:44 +00002631 if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 &&
Yang Hongyange9304382009-04-13 14:40:14 -07002632 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
2633 if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
Guo-Fu Tseng814c01d2009-02-27 17:59:44 +00002634 return 1;
2635
2636 if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 &&
Yang Hongyange9304382009-04-13 14:40:14 -07002637 !pci_set_dma_mask(pdev, DMA_BIT_MASK(40)))
2638 if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)))
Guo-Fu Tseng814c01d2009-02-27 17:59:44 +00002639 return 1;
2640
Yang Hongyang284901a2009-04-06 19:01:15 -07002641 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
2642 if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
Guo-Fu Tseng95252232008-09-16 01:00:11 +08002643 return 0;
2644
2645 return -1;
2646}
2647
2648static inline void
2649jme_phy_init(struct jme_adapter *jme)
2650{
2651 u16 reg26;
2652
2653 reg26 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 26);
2654 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 26, reg26 | 0x1000);
2655}
2656
2657static inline void
2658jme_check_hw_ver(struct jme_adapter *jme)
2659{
2660 u32 chipmode;
2661
2662 chipmode = jread32(jme, JME_CHIPMODE);
2663
2664 jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT;
2665 jme->chiprev = (chipmode & CM_CHIPREV_MASK) >> CM_CHIPREV_SHIFT;
2666}
2667
Stephen Hemmingere48714b2008-11-21 17:28:33 -08002668static const struct net_device_ops jme_netdev_ops = {
2669 .ndo_open = jme_open,
2670 .ndo_stop = jme_close,
2671 .ndo_validate_addr = eth_validate_addr,
2672 .ndo_start_xmit = jme_start_xmit,
2673 .ndo_set_mac_address = jme_set_macaddr,
2674 .ndo_set_multicast_list = jme_set_multi,
2675 .ndo_change_mtu = jme_change_mtu,
2676 .ndo_tx_timeout = jme_tx_timeout,
2677 .ndo_vlan_rx_register = jme_vlan_rx_register,
2678};
2679
Guo-Fu Tseng95252232008-09-16 01:00:11 +08002680static int __devinit
2681jme_init_one(struct pci_dev *pdev,
2682 const struct pci_device_id *ent)
2683{
2684 int rc = 0, using_dac, i;
2685 struct net_device *netdev;
2686 struct jme_adapter *jme;
2687 u16 bmcr, bmsr;
2688 u32 apmc;
2689
2690 /*
2691 * set up PCI device basics
2692 */
2693 rc = pci_enable_device(pdev);
2694 if (rc) {
2695 jeprintk(pdev, "Cannot enable PCI device.\n");
2696 goto err_out;
2697 }
2698
2699 using_dac = jme_pci_dma64(pdev);
2700 if (using_dac < 0) {
2701 jeprintk(pdev, "Cannot set PCI DMA Mask.\n");
2702 rc = -EIO;
2703 goto err_out_disable_pdev;
2704 }
2705
2706 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2707 jeprintk(pdev, "No PCI resource region found.\n");
2708 rc = -ENOMEM;
2709 goto err_out_disable_pdev;
2710 }
2711
2712 rc = pci_request_regions(pdev, DRV_NAME);
2713 if (rc) {
2714 jeprintk(pdev, "Cannot obtain PCI resource region.\n");
2715 goto err_out_disable_pdev;
2716 }
2717
2718 pci_set_master(pdev);
2719
2720 /*
2721 * alloc and init net device
2722 */
2723 netdev = alloc_etherdev(sizeof(*jme));
2724 if (!netdev) {
2725 jeprintk(pdev, "Cannot allocate netdev structure.\n");
2726 rc = -ENOMEM;
2727 goto err_out_release_regions;
2728 }
Stephen Hemmingere48714b2008-11-21 17:28:33 -08002729 netdev->netdev_ops = &jme_netdev_ops;
Guo-Fu Tseng95252232008-09-16 01:00:11 +08002730 netdev->ethtool_ops = &jme_ethtool_ops;
Guo-Fu Tseng95252232008-09-16 01:00:11 +08002731 netdev->watchdog_timeo = TX_TIMEOUT;
Guo-Fu Tseng95252232008-09-16 01:00:11 +08002732 netdev->features = NETIF_F_HW_CSUM |
2733 NETIF_F_SG |
2734 NETIF_F_TSO |
2735 NETIF_F_TSO6 |
2736 NETIF_F_HW_VLAN_TX |
2737 NETIF_F_HW_VLAN_RX;
2738 if (using_dac)
2739 netdev->features |= NETIF_F_HIGHDMA;
2740
2741 SET_NETDEV_DEV(netdev, &pdev->dev);
2742 pci_set_drvdata(pdev, netdev);
2743
2744 /*
2745 * init adapter info
2746 */
2747 jme = netdev_priv(netdev);
2748 jme->pdev = pdev;
2749 jme->dev = netdev;
2750 jme->jme_rx = netif_rx;
2751 jme->jme_vlan_rx = vlan_hwaccel_rx;
2752 jme->old_mtu = netdev->mtu = 1500;
2753 jme->phylink = 0;
2754 jme->tx_ring_size = 1 << 10;
2755 jme->tx_ring_mask = jme->tx_ring_size - 1;
2756 jme->tx_wake_threshold = 1 << 9;
2757 jme->rx_ring_size = 1 << 9;
2758 jme->rx_ring_mask = jme->rx_ring_size - 1;
2759 jme->msg_enable = JME_DEF_MSG_ENABLE;
2760 jme->regs = ioremap(pci_resource_start(pdev, 0),
2761 pci_resource_len(pdev, 0));
2762 if (!(jme->regs)) {
2763 jeprintk(pdev, "Mapping PCI resource region error.\n");
2764 rc = -ENOMEM;
2765 goto err_out_free_netdev;
2766 }
2767 jme->shadow_regs = pci_alloc_consistent(pdev,
2768 sizeof(u32) * SHADOW_REG_NR,
2769 &(jme->shadow_dma));
2770 if (!(jme->shadow_regs)) {
2771 jeprintk(pdev, "Allocating shadow register mapping error.\n");
2772 rc = -ENOMEM;
2773 goto err_out_unmap;
2774 }
2775
2776 if (no_pseudohp) {
2777 apmc = jread32(jme, JME_APMC) & ~JME_APMC_PSEUDO_HP_EN;
2778 jwrite32(jme, JME_APMC, apmc);
2779 } else if (force_pseudohp) {
2780 apmc = jread32(jme, JME_APMC) | JME_APMC_PSEUDO_HP_EN;
2781 jwrite32(jme, JME_APMC, apmc);
2782 }
2783
2784 NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, jme->rx_ring_size >> 2)
2785
2786 spin_lock_init(&jme->phy_lock);
2787 spin_lock_init(&jme->macaddr_lock);
2788 spin_lock_init(&jme->rxmcs_lock);
2789
2790 atomic_set(&jme->link_changing, 1);
2791 atomic_set(&jme->rx_cleaning, 1);
2792 atomic_set(&jme->tx_cleaning, 1);
2793 atomic_set(&jme->rx_empty, 1);
2794
2795 tasklet_init(&jme->pcc_task,
2796 &jme_pcc_tasklet,
2797 (unsigned long) jme);
2798 tasklet_init(&jme->linkch_task,
2799 &jme_link_change_tasklet,
2800 (unsigned long) jme);
2801 tasklet_init(&jme->txclean_task,
2802 &jme_tx_clean_tasklet,
2803 (unsigned long) jme);
2804 tasklet_init(&jme->rxclean_task,
2805 &jme_rx_clean_tasklet,
2806 (unsigned long) jme);
2807 tasklet_init(&jme->rxempty_task,
2808 &jme_rx_empty_tasklet,
2809 (unsigned long) jme);
Guo-Fu Tseng38ed0c22009-07-06 04:37:52 +00002810 tasklet_disable_nosync(&jme->linkch_task);
Guo-Fu Tseng95252232008-09-16 01:00:11 +08002811 tasklet_disable_nosync(&jme->txclean_task);
2812 tasklet_disable_nosync(&jme->rxclean_task);
2813 tasklet_disable_nosync(&jme->rxempty_task);
2814 jme->dpi.cur = PCC_P1;
2815
2816 jme->reg_ghc = 0;
2817 jme->reg_rxcs = RXCS_DEFAULT;
2818 jme->reg_rxmcs = RXMCS_DEFAULT;
2819 jme->reg_txpfc = 0;
2820 jme->reg_pmcs = PMCS_MFEN;
2821 set_bit(JME_FLAG_TXCSUM, &jme->flags);
2822 set_bit(JME_FLAG_TSO, &jme->flags);
2823
2824 /*
2825 * Get Max Read Req Size from PCI Config Space
2826 */
2827 pci_read_config_byte(pdev, PCI_DCSR_MRRS, &jme->mrrs);
2828 jme->mrrs &= PCI_DCSR_MRRS_MASK;
2829 switch (jme->mrrs) {
2830 case MRRS_128B:
2831 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_128B;
2832 break;
2833 case MRRS_256B:
2834 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_256B;
2835 break;
2836 default:
2837 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B;
2838 break;
2839 };
2840
2841 /*
2842 * Must check before reset_mac_processor
2843 */
2844 jme_check_hw_ver(jme);
2845 jme->mii_if.dev = netdev;
2846 if (jme->fpgaver) {
2847 jme->mii_if.phy_id = 0;
2848 for (i = 1 ; i < 32 ; ++i) {
2849 bmcr = jme_mdio_read(netdev, i, MII_BMCR);
2850 bmsr = jme_mdio_read(netdev, i, MII_BMSR);
2851 if (bmcr != 0xFFFFU && (bmcr != 0 || bmsr != 0)) {
2852 jme->mii_if.phy_id = i;
2853 break;
2854 }
2855 }
2856
2857 if (!jme->mii_if.phy_id) {
2858 rc = -EIO;
2859 jeprintk(pdev, "Can not find phy_id.\n");
2860 goto err_out_free_shadow;
2861 }
2862
2863 jme->reg_ghc |= GHC_LINK_POLL;
2864 } else {
2865 jme->mii_if.phy_id = 1;
2866 }
2867 if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250)
2868 jme->mii_if.supports_gmii = true;
2869 else
2870 jme->mii_if.supports_gmii = false;
2871 jme->mii_if.mdio_read = jme_mdio_read;
2872 jme->mii_if.mdio_write = jme_mdio_write;
2873
2874 jme_clear_pm(jme);
2875 jme_set_phyfifoa(jme);
2876 pci_read_config_byte(pdev, PCI_REVISION_ID, &jme->rev);
2877 if (!jme->fpgaver)
2878 jme_phy_init(jme);
2879 jme_phy_off(jme);
2880
2881 /*
2882 * Reset MAC processor and reload EEPROM for MAC Address
2883 */
2884 jme_reset_mac_processor(jme);
2885 rc = jme_reload_eeprom(jme);
2886 if (rc) {
2887 jeprintk(pdev,
2888 "Reload eeprom for reading MAC Address error.\n");
2889 goto err_out_free_shadow;
2890 }
2891 jme_load_macaddr(netdev);
2892
2893 /*
2894 * Tell stack that we are not ready to work until open()
2895 */
2896 netif_carrier_off(netdev);
2897 netif_stop_queue(netdev);
2898
2899 /*
2900 * Register netdev
2901 */
2902 rc = register_netdev(netdev);
2903 if (rc) {
2904 jeprintk(pdev, "Cannot register net device.\n");
2905 goto err_out_free_shadow;
2906 }
2907
Guo-Fu Tseng07c8d2a2009-02-27 17:54:07 +00002908 msg_probe(jme, "%s%s ver:%x rev:%x macaddr:%pM\n",
2909 (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ?
2910 "JMC250 Gigabit Ethernet" :
2911 (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ?
2912 "JMC260 Fast Ethernet" : "Unknown",
Guo-Fu Tseng95252232008-09-16 01:00:11 +08002913 (jme->fpgaver != 0) ? " (FPGA)" : "",
2914 (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev,
Johannes Berg7c510e42008-10-27 17:47:26 -07002915 jme->rev, netdev->dev_addr);
Guo-Fu Tseng95252232008-09-16 01:00:11 +08002916
2917 return 0;
2918
2919err_out_free_shadow:
2920 pci_free_consistent(pdev,
2921 sizeof(u32) * SHADOW_REG_NR,
2922 jme->shadow_regs,
2923 jme->shadow_dma);
2924err_out_unmap:
2925 iounmap(jme->regs);
2926err_out_free_netdev:
2927 pci_set_drvdata(pdev, NULL);
2928 free_netdev(netdev);
2929err_out_release_regions:
2930 pci_release_regions(pdev);
2931err_out_disable_pdev:
2932 pci_disable_device(pdev);
2933err_out:
2934 return rc;
2935}
2936
2937static void __devexit
2938jme_remove_one(struct pci_dev *pdev)
2939{
2940 struct net_device *netdev = pci_get_drvdata(pdev);
2941 struct jme_adapter *jme = netdev_priv(netdev);
2942
2943 unregister_netdev(netdev);
2944 pci_free_consistent(pdev,
2945 sizeof(u32) * SHADOW_REG_NR,
2946 jme->shadow_regs,
2947 jme->shadow_dma);
2948 iounmap(jme->regs);
2949 pci_set_drvdata(pdev, NULL);
2950 free_netdev(netdev);
2951 pci_release_regions(pdev);
2952 pci_disable_device(pdev);
2953
2954}
2955
David S. Miller724f8802008-10-08 19:54:31 -07002956#ifdef CONFIG_PM
Guo-Fu Tseng95252232008-09-16 01:00:11 +08002957static int
2958jme_suspend(struct pci_dev *pdev, pm_message_t state)
2959{
2960 struct net_device *netdev = pci_get_drvdata(pdev);
2961 struct jme_adapter *jme = netdev_priv(netdev);
2962
2963 atomic_dec(&jme->link_changing);
2964
2965 netif_device_detach(netdev);
2966 netif_stop_queue(netdev);
2967 jme_stop_irq(jme);
2968
2969 tasklet_disable(&jme->txclean_task);
2970 tasklet_disable(&jme->rxclean_task);
2971 tasklet_disable(&jme->rxempty_task);
2972
2973 jme_disable_shadow(jme);
2974
2975 if (netif_carrier_ok(netdev)) {
2976 if (test_bit(JME_FLAG_POLL, &jme->flags))
2977 jme_polling_mode(jme);
2978
2979 jme_stop_pcc_timer(jme);
2980 jme_reset_ghc_speed(jme);
2981 jme_disable_rx_engine(jme);
2982 jme_disable_tx_engine(jme);
2983 jme_reset_mac_processor(jme);
2984 jme_free_rx_resources(jme);
2985 jme_free_tx_resources(jme);
2986 netif_carrier_off(netdev);
2987 jme->phylink = 0;
2988 }
2989
2990 tasklet_enable(&jme->txclean_task);
2991 tasklet_hi_enable(&jme->rxclean_task);
2992 tasklet_hi_enable(&jme->rxempty_task);
2993
2994 pci_save_state(pdev);
2995 if (jme->reg_pmcs) {
2996 jme_set_100m_half(jme);
2997
2998 if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
2999 jme_wait_link(jme);
3000
3001 jwrite32(jme, JME_PMCS, jme->reg_pmcs);
3002
3003 pci_enable_wake(pdev, PCI_D3cold, true);
3004 } else {
3005 jme_phy_off(jme);
3006 }
3007 pci_set_power_state(pdev, PCI_D3cold);
3008
3009 return 0;
3010}
3011
3012static int
3013jme_resume(struct pci_dev *pdev)
3014{
3015 struct net_device *netdev = pci_get_drvdata(pdev);
3016 struct jme_adapter *jme = netdev_priv(netdev);
3017
3018 jme_clear_pm(jme);
3019 pci_restore_state(pdev);
3020
3021 if (test_bit(JME_FLAG_SSET, &jme->flags))
3022 jme_set_settings(netdev, &jme->old_ecmd);
3023 else
3024 jme_reset_phy_processor(jme);
3025
3026 jme_enable_shadow(jme);
3027 jme_start_irq(jme);
3028 netif_device_attach(netdev);
3029
3030 atomic_inc(&jme->link_changing);
3031
3032 jme_reset_link(jme);
3033
3034 return 0;
3035}
David S. Miller724f8802008-10-08 19:54:31 -07003036#endif
Guo-Fu Tseng95252232008-09-16 01:00:11 +08003037
3038static struct pci_device_id jme_pci_tbl[] = {
3039 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC250) },
3040 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC260) },
3041 { }
3042};
3043
3044static struct pci_driver jme_driver = {
3045 .name = DRV_NAME,
3046 .id_table = jme_pci_tbl,
3047 .probe = jme_init_one,
3048 .remove = __devexit_p(jme_remove_one),
3049#ifdef CONFIG_PM
3050 .suspend = jme_suspend,
3051 .resume = jme_resume,
3052#endif /* CONFIG_PM */
3053};
3054
3055static int __init
3056jme_init_module(void)
3057{
Guo-Fu Tseng07c8d2a2009-02-27 17:54:07 +00003058 printk(KERN_INFO PFX "JMicron JMC2XX ethernet "
Guo-Fu Tseng95252232008-09-16 01:00:11 +08003059 "driver version %s\n", DRV_VERSION);
3060 return pci_register_driver(&jme_driver);
3061}
3062
3063static void __exit
3064jme_cleanup_module(void)
3065{
3066 pci_unregister_driver(&jme_driver);
3067}
3068
3069module_init(jme_init_module);
3070module_exit(jme_cleanup_module);
3071
3072MODULE_AUTHOR("Guo-Fu Tseng <cooldavid@cooldavid.org>");
3073MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver");
3074MODULE_LICENSE("GPL");
3075MODULE_VERSION(DRV_VERSION);
3076MODULE_DEVICE_TABLE(pci, jme_pci_tbl);
3077