blob: e15451a85107fc13e80fb985e715deb1a27b9b14 [file] [log] [blame]
Daniel Silverstone7a3c66e2008-12-11 21:00:29 -08001/*
2 * Micrel KS8695 (Centaur) Ethernet.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * Copyright 2008 Simtec Electronics
15 * Daniel Silverstone <dsilvers@simtec.co.uk>
16 * Vincent Sanders <vince@simtec.co.uk>
17 */
18
19#include <linux/module.h>
20#include <linux/ioport.h>
21#include <linux/netdevice.h>
22#include <linux/etherdevice.h>
23#include <linux/init.h>
24#include <linux/skbuff.h>
25#include <linux/spinlock.h>
26#include <linux/crc32.h>
27#include <linux/mii.h>
28#include <linux/ethtool.h>
29#include <linux/delay.h>
30#include <linux/platform_device.h>
31#include <linux/irq.h>
Daniel Silverstone7a3c66e2008-12-11 21:00:29 -080032#include <linux/io.h>
33
34#include <asm/irq.h>
35
36#include <mach/regs-switch.h>
37#include <mach/regs-misc.h>
Figo.zhang31b73ab2009-10-28 03:55:24 -070038#include <asm/mach/irq.h>
39#include <mach/regs-irq.h>
Daniel Silverstone7a3c66e2008-12-11 21:00:29 -080040
41#include "ks8695net.h"
42
43#define MODULENAME "ks8695_ether"
Figo.zhang68d82872009-10-30 03:05:11 +000044#define MODULEVERSION "1.02"
Figo.zhang31b73ab2009-10-28 03:55:24 -070045
Daniel Silverstone7a3c66e2008-12-11 21:00:29 -080046/*
47 * Transmit and device reset timeout, default 5 seconds.
48 */
49static int watchdog = 5000;
50
51/* Hardware structures */
52
53/**
54 * struct rx_ring_desc - Receive descriptor ring element
55 * @status: The status of the descriptor element (E.g. who owns it)
56 * @length: The number of bytes in the block pointed to by data_ptr
57 * @data_ptr: The physical address of the data block to receive into
58 * @next_desc: The physical address of the next descriptor element.
59 */
60struct rx_ring_desc {
61 __le32 status;
62 __le32 length;
63 __le32 data_ptr;
64 __le32 next_desc;
65};
66
67/**
68 * struct tx_ring_desc - Transmit descriptor ring element
69 * @owner: Who owns the descriptor
70 * @status: The number of bytes in the block pointed to by data_ptr
71 * @data_ptr: The physical address of the data block to receive into
72 * @next_desc: The physical address of the next descriptor element.
73 */
74struct tx_ring_desc {
75 __le32 owner;
76 __le32 status;
77 __le32 data_ptr;
78 __le32 next_desc;
79};
80
81/**
82 * struct ks8695_skbuff - sk_buff wrapper for rx/tx rings.
83 * @skb: The buffer in the ring
84 * @dma_ptr: The mapped DMA pointer of the buffer
85 * @length: The number of bytes mapped to dma_ptr
86 */
87struct ks8695_skbuff {
88 struct sk_buff *skb;
89 dma_addr_t dma_ptr;
90 u32 length;
91};
92
93/* Private device structure */
94
95#define MAX_TX_DESC 8
96#define MAX_TX_DESC_MASK 0x7
97#define MAX_RX_DESC 16
98#define MAX_RX_DESC_MASK 0xf
99
Figo.zhang68d82872009-10-30 03:05:11 +0000100/*napi_weight have better more than rx DMA buffers*/
101#define NAPI_WEIGHT 64
102
Daniel Silverstone7a3c66e2008-12-11 21:00:29 -0800103#define MAX_RXBUF_SIZE 0x700
104
105#define TX_RING_DMA_SIZE (sizeof(struct tx_ring_desc) * MAX_TX_DESC)
106#define RX_RING_DMA_SIZE (sizeof(struct rx_ring_desc) * MAX_RX_DESC)
107#define RING_DMA_SIZE (TX_RING_DMA_SIZE + RX_RING_DMA_SIZE)
108
109/**
110 * enum ks8695_dtype - Device type
111 * @KS8695_DTYPE_WAN: This device is a WAN interface
112 * @KS8695_DTYPE_LAN: This device is a LAN interface
113 * @KS8695_DTYPE_HPNA: This device is an HPNA interface
114 */
115enum ks8695_dtype {
116 KS8695_DTYPE_WAN,
117 KS8695_DTYPE_LAN,
118 KS8695_DTYPE_HPNA,
119};
120
121/**
122 * struct ks8695_priv - Private data for the KS8695 Ethernet
123 * @in_suspend: Flag to indicate if we're suspending/resuming
124 * @ndev: The net_device for this interface
125 * @dev: The platform device object for this interface
126 * @dtype: The type of this device
127 * @io_regs: The ioremapped registers for this interface
Figo.zhang68d82872009-10-30 03:05:11 +0000128 * @napi : Add support NAPI for Rx
Daniel Silverstone7a3c66e2008-12-11 21:00:29 -0800129 * @rx_irq_name: The textual name of the RX IRQ from the platform data
130 * @tx_irq_name: The textual name of the TX IRQ from the platform data
131 * @link_irq_name: The textual name of the link IRQ from the
132 * platform data if available
133 * @rx_irq: The IRQ number for the RX IRQ
134 * @tx_irq: The IRQ number for the TX IRQ
135 * @link_irq: The IRQ number for the link IRQ if available
136 * @regs_req: The resource request for the registers region
137 * @phyiface_req: The resource request for the phy/switch region
138 * if available
139 * @phyiface_regs: The ioremapped registers for the phy/switch if available
140 * @ring_base: The base pointer of the dma coherent memory for the rings
141 * @ring_base_dma: The DMA mapped equivalent of ring_base
142 * @tx_ring: The pointer in ring_base of the TX ring
143 * @tx_ring_used: The number of slots in the TX ring which are occupied
144 * @tx_ring_next_slot: The next slot to fill in the TX ring
145 * @tx_ring_dma: The DMA mapped equivalent of tx_ring
146 * @tx_buffers: The sk_buff mappings for the TX ring
147 * @txq_lock: A lock to protect the tx_buffers tx_ring_used etc variables
148 * @rx_ring: The pointer in ring_base of the RX ring
149 * @rx_ring_dma: The DMA mapped equivalent of rx_ring
150 * @rx_buffers: The sk_buff mappings for the RX ring
151 * @next_rx_desc_read: The next RX descriptor to read from on IRQ
Figo.zhang68d82872009-10-30 03:05:11 +0000152 * @rx_lock: A lock to protect Rx irq function
Daniel Silverstone7a3c66e2008-12-11 21:00:29 -0800153 * @msg_enable: The flags for which messages to emit
154 */
155struct ks8695_priv {
156 int in_suspend;
157 struct net_device *ndev;
158 struct device *dev;
159 enum ks8695_dtype dtype;
160 void __iomem *io_regs;
161
Figo.zhang31b73ab2009-10-28 03:55:24 -0700162 struct napi_struct napi;
163
Daniel Silverstone7a3c66e2008-12-11 21:00:29 -0800164 const char *rx_irq_name, *tx_irq_name, *link_irq_name;
165 int rx_irq, tx_irq, link_irq;
166
167 struct resource *regs_req, *phyiface_req;
168 void __iomem *phyiface_regs;
169
170 void *ring_base;
171 dma_addr_t ring_base_dma;
172
173 struct tx_ring_desc *tx_ring;
174 int tx_ring_used;
175 int tx_ring_next_slot;
176 dma_addr_t tx_ring_dma;
177 struct ks8695_skbuff tx_buffers[MAX_TX_DESC];
178 spinlock_t txq_lock;
179
180 struct rx_ring_desc *rx_ring;
181 dma_addr_t rx_ring_dma;
182 struct ks8695_skbuff rx_buffers[MAX_RX_DESC];
183 int next_rx_desc_read;
Figo.zhang31b73ab2009-10-28 03:55:24 -0700184 spinlock_t rx_lock;
Daniel Silverstone7a3c66e2008-12-11 21:00:29 -0800185
186 int msg_enable;
187};
188
189/* Register access */
190
191/**
192 * ks8695_readreg - Read from a KS8695 ethernet register
193 * @ksp: The device to read from
194 * @reg: The register to read
195 */
196static inline u32
197ks8695_readreg(struct ks8695_priv *ksp, int reg)
198{
199 return readl(ksp->io_regs + reg);
200}
201
202/**
203 * ks8695_writereg - Write to a KS8695 ethernet register
204 * @ksp: The device to write to
205 * @reg: The register to write
206 * @value: The value to write to the register
207 */
208static inline void
209ks8695_writereg(struct ks8695_priv *ksp, int reg, u32 value)
210{
211 writel(value, ksp->io_regs + reg);
212}
213
214/* Utility functions */
215
216/**
217 * ks8695_port_type - Retrieve port-type as user-friendly string
218 * @ksp: The device to return the type for
219 *
220 * Returns a string indicating which of the WAN, LAN or HPNA
221 * ports this device is likely to represent.
222 */
223static const char *
224ks8695_port_type(struct ks8695_priv *ksp)
225{
226 switch (ksp->dtype) {
227 case KS8695_DTYPE_LAN:
228 return "LAN";
229 case KS8695_DTYPE_WAN:
230 return "WAN";
231 case KS8695_DTYPE_HPNA:
232 return "HPNA";
233 }
234
235 return "UNKNOWN";
236}
237
238/**
239 * ks8695_update_mac - Update the MAC registers in the device
240 * @ksp: The device to update
241 *
242 * Updates the MAC registers in the KS8695 device from the address in the
243 * net_device structure associated with this interface.
244 */
245static void
246ks8695_update_mac(struct ks8695_priv *ksp)
247{
248 /* Update the HW with the MAC from the net_device */
249 struct net_device *ndev = ksp->ndev;
250 u32 machigh, maclow;
251
252 maclow = ((ndev->dev_addr[2] << 24) | (ndev->dev_addr[3] << 16) |
253 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5] << 0));
254 machigh = ((ndev->dev_addr[0] << 8) | (ndev->dev_addr[1] << 0));
255
256 ks8695_writereg(ksp, KS8695_MAL, maclow);
257 ks8695_writereg(ksp, KS8695_MAH, machigh);
258
259}
260
261/**
262 * ks8695_refill_rxbuffers - Re-fill the RX buffer ring
263 * @ksp: The device to refill
264 *
265 * Iterates the RX ring of the device looking for empty slots.
266 * For each empty slot, we allocate and map a new SKB and give it
267 * to the hardware.
268 * This can be called from interrupt context safely.
269 */
270static void
271ks8695_refill_rxbuffers(struct ks8695_priv *ksp)
272{
273 /* Run around the RX ring, filling in any missing sk_buff's */
274 int buff_n;
275
276 for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) {
277 if (!ksp->rx_buffers[buff_n].skb) {
278 struct sk_buff *skb = dev_alloc_skb(MAX_RXBUF_SIZE);
279 dma_addr_t mapping;
280
281 ksp->rx_buffers[buff_n].skb = skb;
282 if (skb == NULL) {
283 /* Failed to allocate one, perhaps
284 * we'll try again later.
285 */
286 break;
287 }
288
289 mapping = dma_map_single(ksp->dev, skb->data,
290 MAX_RXBUF_SIZE,
291 DMA_FROM_DEVICE);
292 if (unlikely(dma_mapping_error(ksp->dev, mapping))) {
293 /* Failed to DMA map this SKB, try later */
294 dev_kfree_skb_irq(skb);
295 ksp->rx_buffers[buff_n].skb = NULL;
296 break;
297 }
298 ksp->rx_buffers[buff_n].dma_ptr = mapping;
299 skb->dev = ksp->ndev;
300 ksp->rx_buffers[buff_n].length = MAX_RXBUF_SIZE;
301
302 /* Record this into the DMA ring */
303 ksp->rx_ring[buff_n].data_ptr = cpu_to_le32(mapping);
304 ksp->rx_ring[buff_n].length =
305 cpu_to_le32(MAX_RXBUF_SIZE);
306
307 wmb();
308
309 /* And give ownership over to the hardware */
310 ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN);
311 }
312 }
313}
314
315/* Maximum number of multicast addresses which the KS8695 HW supports */
316#define KS8695_NR_ADDRESSES 16
317
318/**
319 * ks8695_init_partial_multicast - Init the mcast addr registers
320 * @ksp: The device to initialise
321 * @addr: The multicast address list to use
322 * @nr_addr: The number of addresses in the list
323 *
324 * This routine is a helper for ks8695_set_multicast - it writes
325 * the additional-address registers in the KS8695 ethernet device
326 * and cleans up any others left behind.
327 */
328static void
329ks8695_init_partial_multicast(struct ks8695_priv *ksp,
330 struct dev_mc_list *addr,
331 int nr_addr)
332{
333 u32 low, high;
334 int i;
335
336 for (i = 0; i < nr_addr; i++, addr = addr->next) {
337 /* Ran out of addresses? */
338 if (!addr)
339 break;
340 /* Ran out of space in chip? */
341 BUG_ON(i == KS8695_NR_ADDRESSES);
342
343 low = (addr->dmi_addr[2] << 24) | (addr->dmi_addr[3] << 16) |
344 (addr->dmi_addr[4] << 8) | (addr->dmi_addr[5]);
345 high = (addr->dmi_addr[0] << 8) | (addr->dmi_addr[1]);
346
347 ks8695_writereg(ksp, KS8695_AAL_(i), low);
348 ks8695_writereg(ksp, KS8695_AAH_(i), AAH_E | high);
349 }
350
351 /* Clear the remaining Additional Station Addresses */
352 for (; i < KS8695_NR_ADDRESSES; i++) {
353 ks8695_writereg(ksp, KS8695_AAL_(i), 0);
354 ks8695_writereg(ksp, KS8695_AAH_(i), 0);
355 }
356}
357
358/* Interrupt handling */
359
360/**
361 * ks8695_tx_irq - Transmit IRQ handler
362 * @irq: The IRQ which went off (ignored)
363 * @dev_id: The net_device for the interrupt
364 *
365 * Process the TX ring, clearing out any transmitted slots.
366 * Allows the net_device to pass us new packets once slots are
367 * freed.
368 */
369static irqreturn_t
370ks8695_tx_irq(int irq, void *dev_id)
371{
372 struct net_device *ndev = (struct net_device *)dev_id;
373 struct ks8695_priv *ksp = netdev_priv(ndev);
374 int buff_n;
375
376 for (buff_n = 0; buff_n < MAX_TX_DESC; ++buff_n) {
377 if (ksp->tx_buffers[buff_n].skb &&
378 !(ksp->tx_ring[buff_n].owner & cpu_to_le32(TDES_OWN))) {
379 rmb();
380 /* An SKB which is not owned by HW is present */
381 /* Update the stats for the net_device */
382 ndev->stats.tx_packets++;
383 ndev->stats.tx_bytes += ksp->tx_buffers[buff_n].length;
384
385 /* Free the packet from the ring */
386 ksp->tx_ring[buff_n].data_ptr = 0;
387
388 /* Free the sk_buff */
389 dma_unmap_single(ksp->dev,
390 ksp->tx_buffers[buff_n].dma_ptr,
391 ksp->tx_buffers[buff_n].length,
392 DMA_TO_DEVICE);
393 dev_kfree_skb_irq(ksp->tx_buffers[buff_n].skb);
394 ksp->tx_buffers[buff_n].skb = NULL;
395 ksp->tx_ring_used--;
396 }
397 }
398
399 netif_wake_queue(ndev);
400
401 return IRQ_HANDLED;
402}
403
404/**
Figo.zhang68d82872009-10-30 03:05:11 +0000405 * ks8695_get_rx_enable_bit - Get rx interrupt enable/status bit
406 * @ksp: Private data for the KS8695 Ethernet
407 *
408 * For KS8695 document:
409 * Interrupt Enable Register (offset 0xE204)
410 * Bit29 : WAN MAC Receive Interrupt Enable
411 * Bit16 : LAN MAC Receive Interrupt Enable
412 * Interrupt Status Register (Offset 0xF208)
413 * Bit29: WAN MAC Receive Status
414 * Bit16: LAN MAC Receive Status
415 * So, this Rx interrrupt enable/status bit number is equal
416 * as Rx IRQ number.
417 */
418static inline u32 ks8695_get_rx_enable_bit(struct ks8695_priv *ksp)
419{
420 return ksp->rx_irq;
421}
422
423/**
Daniel Silverstone7a3c66e2008-12-11 21:00:29 -0800424 * ks8695_rx_irq - Receive IRQ handler
425 * @irq: The IRQ which went off (ignored)
426 * @dev_id: The net_device for the interrupt
427 *
Figo.zhang68d82872009-10-30 03:05:11 +0000428 * Inform NAPI that packet reception needs to be scheduled
Daniel Silverstone7a3c66e2008-12-11 21:00:29 -0800429 */
Figo.zhang31b73ab2009-10-28 03:55:24 -0700430
Daniel Silverstone7a3c66e2008-12-11 21:00:29 -0800431static irqreturn_t
432ks8695_rx_irq(int irq, void *dev_id)
433{
434 struct net_device *ndev = (struct net_device *)dev_id;
435 struct ks8695_priv *ksp = netdev_priv(ndev);
Figo.zhang31b73ab2009-10-28 03:55:24 -0700436
437 spin_lock(&ksp->rx_lock);
438
zealfa6cae12009-11-16 04:58:09 +0000439 if (napi_schedule_prep(&ksp->napi)) {
440 unsigned long status = readl(KS8695_IRQ_VA + KS8695_INTEN);
441 unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp);
442 /*disable rx interrupt*/
443 status &= ~mask_bit;
444 writel(status , KS8695_IRQ_VA + KS8695_INTEN);
445 __napi_schedule(&ksp->napi);
Figo.zhang31b73ab2009-10-28 03:55:24 -0700446 }
447
448 spin_unlock(&ksp->rx_lock);
449 return IRQ_HANDLED;
450}
451
Figo.zhang68d82872009-10-30 03:05:11 +0000452/**
453 * ks8695_rx - Receive packets called by NAPI poll method
454 * @ksp: Private data for the KS8695 Ethernet
455 * @budget: The max packets would be receive
456 */
457
458static int ks8695_rx(struct ks8695_priv *ksp, int budget)
Figo.zhang31b73ab2009-10-28 03:55:24 -0700459{
Figo.zhang68d82872009-10-30 03:05:11 +0000460 struct net_device *ndev = ksp->ndev;
Daniel Silverstone7a3c66e2008-12-11 21:00:29 -0800461 struct sk_buff *skb;
462 int buff_n;
463 u32 flags;
464 int pktlen;
465 int last_rx_processed = -1;
Figo.zhang31b73ab2009-10-28 03:55:24 -0700466 int received = 0;
Daniel Silverstone7a3c66e2008-12-11 21:00:29 -0800467
468 buff_n = ksp->next_rx_desc_read;
Figo.zhang31b73ab2009-10-28 03:55:24 -0700469 while (received < budget
470 && ksp->rx_buffers[buff_n].skb
471 && (!(ksp->rx_ring[buff_n].status &
472 cpu_to_le32(RDES_OWN)))) {
Daniel Silverstone7a3c66e2008-12-11 21:00:29 -0800473 rmb();
474 flags = le32_to_cpu(ksp->rx_ring[buff_n].status);
475 /* Found an SKB which we own, this means we
476 * received a packet
477 */
478 if ((flags & (RDES_FS | RDES_LS)) !=
479 (RDES_FS | RDES_LS)) {
480 /* This packet is not the first and
481 * the last segment. Therefore it is
482 * a "spanning" packet and we can't
483 * handle it
484 */
485 goto rx_failure;
486 }
487
488 if (flags & (RDES_ES | RDES_RE)) {
489 /* It's an error packet */
490 ndev->stats.rx_errors++;
491 if (flags & RDES_TL)
492 ndev->stats.rx_length_errors++;
493 if (flags & RDES_RF)
494 ndev->stats.rx_length_errors++;
495 if (flags & RDES_CE)
496 ndev->stats.rx_crc_errors++;
497 if (flags & RDES_RE)
498 ndev->stats.rx_missed_errors++;
499
500 goto rx_failure;
501 }
502
503 pktlen = flags & RDES_FLEN;
504 pktlen -= 4; /* Drop the CRC */
505
506 /* Retrieve the sk_buff */
507 skb = ksp->rx_buffers[buff_n].skb;
508
509 /* Clear it from the ring */
510 ksp->rx_buffers[buff_n].skb = NULL;
511 ksp->rx_ring[buff_n].data_ptr = 0;
512
513 /* Unmap the SKB */
514 dma_unmap_single(ksp->dev,
515 ksp->rx_buffers[buff_n].dma_ptr,
516 ksp->rx_buffers[buff_n].length,
517 DMA_FROM_DEVICE);
518
519 /* Relinquish the SKB to the network layer */
520 skb_put(skb, pktlen);
521 skb->protocol = eth_type_trans(skb, ndev);
Figo.zhang31b73ab2009-10-28 03:55:24 -0700522 netif_receive_skb(skb);
Daniel Silverstone7a3c66e2008-12-11 21:00:29 -0800523
524 /* Record stats */
Daniel Silverstone7a3c66e2008-12-11 21:00:29 -0800525 ndev->stats.rx_packets++;
526 ndev->stats.rx_bytes += pktlen;
527 goto rx_finished;
528
529rx_failure:
530 /* This ring entry is an error, but we can
531 * re-use the skb
532 */
533 /* Give the ring entry back to the hardware */
534 ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN);
535rx_finished:
Figo.zhang31b73ab2009-10-28 03:55:24 -0700536 received++;
Daniel Silverstone7a3c66e2008-12-11 21:00:29 -0800537 /* And note this as processed so we can start
538 * from here next time
539 */
540 last_rx_processed = buff_n;
Figo.zhang31b73ab2009-10-28 03:55:24 -0700541 buff_n = (buff_n + 1) & MAX_RX_DESC_MASK;
542 /*And note which RX descriptor we last did */
543 if (likely(last_rx_processed != -1))
544 ksp->next_rx_desc_read =
545 (last_rx_processed + 1) &
546 MAX_RX_DESC_MASK;
Daniel Silverstone7a3c66e2008-12-11 21:00:29 -0800547
Figo.zhang31b73ab2009-10-28 03:55:24 -0700548 /* And refill the buffers */
549 ks8695_refill_rxbuffers(ksp);
Figo.zhang68d82872009-10-30 03:05:11 +0000550
551 /* Kick the RX DMA engine, in case it became
552 * suspended */
553 ks8695_writereg(ksp, KS8695_DRSC, 0);
Figo.zhang31b73ab2009-10-28 03:55:24 -0700554 }
555 return received;
556}
Daniel Silverstone7a3c66e2008-12-11 21:00:29 -0800557
Figo.zhang68d82872009-10-30 03:05:11 +0000558
559/**
560 * ks8695_poll - Receive packet by NAPI poll method
561 * @ksp: Private data for the KS8695 Ethernet
562 * @budget: The remaining number packets for network subsystem
563 *
564 * Invoked by the network core when it requests for new
565 * packets from the driver
566 */
Figo.zhang31b73ab2009-10-28 03:55:24 -0700567static int ks8695_poll(struct napi_struct *napi, int budget)
568{
569 struct ks8695_priv *ksp = container_of(napi, struct ks8695_priv, napi);
Figo.zhang68d82872009-10-30 03:05:11 +0000570 unsigned long work_done;
571
Figo.zhang31b73ab2009-10-28 03:55:24 -0700572 unsigned long isr = readl(KS8695_IRQ_VA + KS8695_INTEN);
Figo.zhang68d82872009-10-30 03:05:11 +0000573 unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp);
Daniel Silverstone7a3c66e2008-12-11 21:00:29 -0800574
Figo.zhang68d82872009-10-30 03:05:11 +0000575 work_done = ks8695_rx(ksp, budget);
Figo.zhang31b73ab2009-10-28 03:55:24 -0700576
577 if (work_done < budget) {
578 unsigned long flags;
579 spin_lock_irqsave(&ksp->rx_lock, flags);
580 /*enable rx interrupt*/
581 writel(isr | mask_bit, KS8695_IRQ_VA + KS8695_INTEN);
582 __napi_complete(napi);
583 spin_unlock_irqrestore(&ksp->rx_lock, flags);
584 }
585 return work_done;
Daniel Silverstone7a3c66e2008-12-11 21:00:29 -0800586}
587
588/**
589 * ks8695_link_irq - Link change IRQ handler
590 * @irq: The IRQ which went off (ignored)
591 * @dev_id: The net_device for the interrupt
592 *
593 * The WAN interface can generate an IRQ when the link changes,
594 * report this to the net layer and the user.
595 */
596static irqreturn_t
597ks8695_link_irq(int irq, void *dev_id)
598{
599 struct net_device *ndev = (struct net_device *)dev_id;
600 struct ks8695_priv *ksp = netdev_priv(ndev);
601 u32 ctrl;
602
603 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
604 if (ctrl & WMC_WLS) {
605 netif_carrier_on(ndev);
606 if (netif_msg_link(ksp))
607 dev_info(ksp->dev,
608 "%s: Link is now up (10%sMbps/%s-duplex)\n",
609 ndev->name,
610 (ctrl & WMC_WSS) ? "0" : "",
611 (ctrl & WMC_WDS) ? "Full" : "Half");
612 } else {
613 netif_carrier_off(ndev);
614 if (netif_msg_link(ksp))
615 dev_info(ksp->dev, "%s: Link is now down.\n",
616 ndev->name);
617 }
618
619 return IRQ_HANDLED;
620}
621
622
623/* KS8695 Device functions */
624
625/**
626 * ks8695_reset - Reset a KS8695 ethernet interface
627 * @ksp: The interface to reset
628 *
629 * Perform an engine reset of the interface and re-program it
630 * with sensible defaults.
631 */
632static void
633ks8695_reset(struct ks8695_priv *ksp)
634{
635 int reset_timeout = watchdog;
636 /* Issue the reset via the TX DMA control register */
637 ks8695_writereg(ksp, KS8695_DTXC, DTXC_TRST);
638 while (reset_timeout--) {
639 if (!(ks8695_readreg(ksp, KS8695_DTXC) & DTXC_TRST))
640 break;
641 msleep(1);
642 }
643
Roel Kluin858b9ce2009-03-04 00:11:42 -0800644 if (reset_timeout < 0) {
Daniel Silverstone7a3c66e2008-12-11 21:00:29 -0800645 dev_crit(ksp->dev,
646 "Timeout waiting for DMA engines to reset\n");
647 /* And blithely carry on */
648 }
649
650 /* Definitely wait long enough before attempting to program
651 * the engines
652 */
653 msleep(10);
654
655 /* RX: unicast and broadcast */
656 ks8695_writereg(ksp, KS8695_DRXC, DRXC_RU | DRXC_RB);
657 /* TX: pad and add CRC */
658 ks8695_writereg(ksp, KS8695_DTXC, DTXC_TEP | DTXC_TAC);
659}
660
661/**
662 * ks8695_shutdown - Shut down a KS8695 ethernet interface
663 * @ksp: The interface to shut down
664 *
665 * This disables packet RX/TX, cleans up IRQs, drains the rings,
666 * and basically places the interface into a clean shutdown
667 * state.
668 */
669static void
670ks8695_shutdown(struct ks8695_priv *ksp)
671{
672 u32 ctrl;
673 int buff_n;
674
675 /* Disable packet transmission */
676 ctrl = ks8695_readreg(ksp, KS8695_DTXC);
677 ks8695_writereg(ksp, KS8695_DTXC, ctrl & ~DTXC_TE);
678
679 /* Disable packet reception */
680 ctrl = ks8695_readreg(ksp, KS8695_DRXC);
681 ks8695_writereg(ksp, KS8695_DRXC, ctrl & ~DRXC_RE);
682
683 /* Release the IRQs */
684 free_irq(ksp->rx_irq, ksp->ndev);
685 free_irq(ksp->tx_irq, ksp->ndev);
686 if (ksp->link_irq != -1)
687 free_irq(ksp->link_irq, ksp->ndev);
688
689 /* Throw away any pending TX packets */
690 for (buff_n = 0; buff_n < MAX_TX_DESC; ++buff_n) {
691 if (ksp->tx_buffers[buff_n].skb) {
692 /* Remove this SKB from the TX ring */
693 ksp->tx_ring[buff_n].owner = 0;
694 ksp->tx_ring[buff_n].status = 0;
695 ksp->tx_ring[buff_n].data_ptr = 0;
696
697 /* Unmap and bin this SKB */
698 dma_unmap_single(ksp->dev,
699 ksp->tx_buffers[buff_n].dma_ptr,
700 ksp->tx_buffers[buff_n].length,
701 DMA_TO_DEVICE);
702 dev_kfree_skb_irq(ksp->tx_buffers[buff_n].skb);
703 ksp->tx_buffers[buff_n].skb = NULL;
704 }
705 }
706
707 /* Purge the RX buffers */
708 for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) {
709 if (ksp->rx_buffers[buff_n].skb) {
710 /* Remove the SKB from the RX ring */
711 ksp->rx_ring[buff_n].status = 0;
712 ksp->rx_ring[buff_n].data_ptr = 0;
713
714 /* Unmap and bin the SKB */
715 dma_unmap_single(ksp->dev,
716 ksp->rx_buffers[buff_n].dma_ptr,
717 ksp->rx_buffers[buff_n].length,
718 DMA_FROM_DEVICE);
719 dev_kfree_skb_irq(ksp->rx_buffers[buff_n].skb);
720 ksp->rx_buffers[buff_n].skb = NULL;
721 }
722 }
723}
724
725
726/**
727 * ks8695_setup_irq - IRQ setup helper function
728 * @irq: The IRQ number to claim
729 * @irq_name: The name to give the IRQ claimant
730 * @handler: The function to call to handle the IRQ
731 * @ndev: The net_device to pass in as the dev_id argument to the handler
732 *
733 * Return 0 on success.
734 */
735static int
736ks8695_setup_irq(int irq, const char *irq_name,
737 irq_handler_t handler, struct net_device *ndev)
738{
739 int ret;
740
741 ret = request_irq(irq, handler, IRQF_SHARED, irq_name, ndev);
742
743 if (ret) {
744 dev_err(&ndev->dev, "failure to request IRQ %d\n", irq);
745 return ret;
746 }
747
748 return 0;
749}
750
751/**
752 * ks8695_init_net - Initialise a KS8695 ethernet interface
753 * @ksp: The interface to initialise
754 *
755 * This routine fills the RX ring, initialises the DMA engines,
756 * allocates the IRQs and then starts the packet TX and RX
757 * engines.
758 */
759static int
760ks8695_init_net(struct ks8695_priv *ksp)
761{
762 int ret;
763 u32 ctrl;
764
765 ks8695_refill_rxbuffers(ksp);
766
767 /* Initialise the DMA engines */
768 ks8695_writereg(ksp, KS8695_RDLB, (u32) ksp->rx_ring_dma);
769 ks8695_writereg(ksp, KS8695_TDLB, (u32) ksp->tx_ring_dma);
770
771 /* Request the IRQs */
772 ret = ks8695_setup_irq(ksp->rx_irq, ksp->rx_irq_name,
773 ks8695_rx_irq, ksp->ndev);
774 if (ret)
775 return ret;
776 ret = ks8695_setup_irq(ksp->tx_irq, ksp->tx_irq_name,
777 ks8695_tx_irq, ksp->ndev);
778 if (ret)
779 return ret;
780 if (ksp->link_irq != -1) {
781 ret = ks8695_setup_irq(ksp->link_irq, ksp->link_irq_name,
782 ks8695_link_irq, ksp->ndev);
783 if (ret)
784 return ret;
785 }
786
787 /* Set up the ring indices */
788 ksp->next_rx_desc_read = 0;
789 ksp->tx_ring_next_slot = 0;
790 ksp->tx_ring_used = 0;
791
792 /* Bring up transmission */
793 ctrl = ks8695_readreg(ksp, KS8695_DTXC);
794 /* Enable packet transmission */
795 ks8695_writereg(ksp, KS8695_DTXC, ctrl | DTXC_TE);
796
797 /* Bring up the reception */
798 ctrl = ks8695_readreg(ksp, KS8695_DRXC);
799 /* Enable packet reception */
800 ks8695_writereg(ksp, KS8695_DRXC, ctrl | DRXC_RE);
801 /* And start the DMA engine */
802 ks8695_writereg(ksp, KS8695_DRSC, 0);
803
804 /* All done */
805 return 0;
806}
807
808/**
809 * ks8695_release_device - HW resource release for KS8695 e-net
810 * @ksp: The device to be freed
811 *
812 * This unallocates io memory regions, dma-coherent regions etc
813 * which were allocated in ks8695_probe.
814 */
815static void
816ks8695_release_device(struct ks8695_priv *ksp)
817{
818 /* Unmap the registers */
819 iounmap(ksp->io_regs);
820 if (ksp->phyiface_regs)
821 iounmap(ksp->phyiface_regs);
822
823 /* And release the request */
824 release_resource(ksp->regs_req);
825 kfree(ksp->regs_req);
826 if (ksp->phyiface_req) {
827 release_resource(ksp->phyiface_req);
828 kfree(ksp->phyiface_req);
829 }
830
831 /* Free the ring buffers */
832 dma_free_coherent(ksp->dev, RING_DMA_SIZE,
833 ksp->ring_base, ksp->ring_base_dma);
834}
835
836/* Ethtool support */
837
838/**
839 * ks8695_get_msglevel - Get the messages enabled for emission
840 * @ndev: The network device to read from
841 */
842static u32
843ks8695_get_msglevel(struct net_device *ndev)
844{
845 struct ks8695_priv *ksp = netdev_priv(ndev);
846
847 return ksp->msg_enable;
848}
849
850/**
851 * ks8695_set_msglevel - Set the messages enabled for emission
852 * @ndev: The network device to configure
853 * @value: The messages to set for emission
854 */
855static void
856ks8695_set_msglevel(struct net_device *ndev, u32 value)
857{
858 struct ks8695_priv *ksp = netdev_priv(ndev);
859
860 ksp->msg_enable = value;
861}
862
863/**
864 * ks8695_get_settings - Get device-specific settings.
865 * @ndev: The network device to read settings from
866 * @cmd: The ethtool structure to read into
867 */
868static int
869ks8695_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
870{
871 struct ks8695_priv *ksp = netdev_priv(ndev);
872 u32 ctrl;
873
874 /* All ports on the KS8695 support these... */
875 cmd->supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
876 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
877 SUPPORTED_TP | SUPPORTED_MII);
878 cmd->transceiver = XCVR_INTERNAL;
879
880 /* Port specific extras */
881 switch (ksp->dtype) {
882 case KS8695_DTYPE_HPNA:
883 cmd->phy_address = 0;
884 /* not supported for HPNA */
885 cmd->autoneg = AUTONEG_DISABLE;
886
887 /* BUG: Erm, dtype hpna implies no phy regs */
888 /*
889 ctrl = readl(KS8695_MISC_VA + KS8695_HMC);
890 cmd->speed = (ctrl & HMC_HSS) ? SPEED_100 : SPEED_10;
891 cmd->duplex = (ctrl & HMC_HDS) ? DUPLEX_FULL : DUPLEX_HALF;
892 */
893 return -EOPNOTSUPP;
894 case KS8695_DTYPE_WAN:
895 cmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
896 cmd->port = PORT_MII;
897 cmd->supported |= (SUPPORTED_Autoneg | SUPPORTED_Pause);
898 cmd->phy_address = 0;
899
900 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
901 if ((ctrl & WMC_WAND) == 0) {
902 /* auto-negotiation is enabled */
903 cmd->advertising |= ADVERTISED_Autoneg;
904 if (ctrl & WMC_WANA100F)
905 cmd->advertising |= ADVERTISED_100baseT_Full;
906 if (ctrl & WMC_WANA100H)
907 cmd->advertising |= ADVERTISED_100baseT_Half;
908 if (ctrl & WMC_WANA10F)
909 cmd->advertising |= ADVERTISED_10baseT_Full;
910 if (ctrl & WMC_WANA10H)
911 cmd->advertising |= ADVERTISED_10baseT_Half;
912 if (ctrl & WMC_WANAP)
913 cmd->advertising |= ADVERTISED_Pause;
914 cmd->autoneg = AUTONEG_ENABLE;
915
916 cmd->speed = (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10;
917 cmd->duplex = (ctrl & WMC_WDS) ?
918 DUPLEX_FULL : DUPLEX_HALF;
919 } else {
920 /* auto-negotiation is disabled */
921 cmd->autoneg = AUTONEG_DISABLE;
922
923 cmd->speed = (ctrl & WMC_WANF100) ?
924 SPEED_100 : SPEED_10;
925 cmd->duplex = (ctrl & WMC_WANFF) ?
926 DUPLEX_FULL : DUPLEX_HALF;
927 }
928 break;
929 case KS8695_DTYPE_LAN:
930 return -EOPNOTSUPP;
931 }
932
933 return 0;
934}
935
936/**
937 * ks8695_set_settings - Set device-specific settings.
938 * @ndev: The network device to configure
939 * @cmd: The settings to configure
940 */
941static int
942ks8695_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
943{
944 struct ks8695_priv *ksp = netdev_priv(ndev);
945 u32 ctrl;
946
947 if ((cmd->speed != SPEED_10) && (cmd->speed != SPEED_100))
948 return -EINVAL;
949 if ((cmd->duplex != DUPLEX_HALF) && (cmd->duplex != DUPLEX_FULL))
950 return -EINVAL;
951 if (cmd->port != PORT_MII)
952 return -EINVAL;
953 if (cmd->transceiver != XCVR_INTERNAL)
954 return -EINVAL;
955 if ((cmd->autoneg != AUTONEG_DISABLE) &&
956 (cmd->autoneg != AUTONEG_ENABLE))
957 return -EINVAL;
958
959 if (cmd->autoneg == AUTONEG_ENABLE) {
960 if ((cmd->advertising & (ADVERTISED_10baseT_Half |
961 ADVERTISED_10baseT_Full |
962 ADVERTISED_100baseT_Half |
963 ADVERTISED_100baseT_Full)) == 0)
964 return -EINVAL;
965
966 switch (ksp->dtype) {
967 case KS8695_DTYPE_HPNA:
968 /* HPNA does not support auto-negotiation. */
969 return -EINVAL;
970 case KS8695_DTYPE_WAN:
971 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
972
973 ctrl &= ~(WMC_WAND | WMC_WANA100F | WMC_WANA100H |
974 WMC_WANA10F | WMC_WANA10H);
975 if (cmd->advertising & ADVERTISED_100baseT_Full)
976 ctrl |= WMC_WANA100F;
977 if (cmd->advertising & ADVERTISED_100baseT_Half)
978 ctrl |= WMC_WANA100H;
979 if (cmd->advertising & ADVERTISED_10baseT_Full)
980 ctrl |= WMC_WANA10F;
981 if (cmd->advertising & ADVERTISED_10baseT_Half)
982 ctrl |= WMC_WANA10H;
983
984 /* force a re-negotiation */
985 ctrl |= WMC_WANR;
986 writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
987 break;
988 case KS8695_DTYPE_LAN:
989 return -EOPNOTSUPP;
990 }
991
992 } else {
993 switch (ksp->dtype) {
994 case KS8695_DTYPE_HPNA:
995 /* BUG: dtype_hpna implies no phy registers */
996 /*
997 ctrl = __raw_readl(KS8695_MISC_VA + KS8695_HMC);
998
999 ctrl &= ~(HMC_HSS | HMC_HDS);
1000 if (cmd->speed == SPEED_100)
1001 ctrl |= HMC_HSS;
1002 if (cmd->duplex == DUPLEX_FULL)
1003 ctrl |= HMC_HDS;
1004
1005 __raw_writel(ctrl, KS8695_MISC_VA + KS8695_HMC);
1006 */
1007 return -EOPNOTSUPP;
1008 case KS8695_DTYPE_WAN:
1009 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
1010
1011 /* disable auto-negotiation */
1012 ctrl |= WMC_WAND;
1013 ctrl &= ~(WMC_WANF100 | WMC_WANFF);
1014
1015 if (cmd->speed == SPEED_100)
1016 ctrl |= WMC_WANF100;
1017 if (cmd->duplex == DUPLEX_FULL)
1018 ctrl |= WMC_WANFF;
1019
1020 writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
1021 break;
1022 case KS8695_DTYPE_LAN:
1023 return -EOPNOTSUPP;
1024 }
1025 }
1026
1027 return 0;
1028}
1029
1030/**
1031 * ks8695_nwayreset - Restart the autonegotiation on the port.
1032 * @ndev: The network device to restart autoneotiation on
1033 */
1034static int
1035ks8695_nwayreset(struct net_device *ndev)
1036{
1037 struct ks8695_priv *ksp = netdev_priv(ndev);
1038 u32 ctrl;
1039
1040 switch (ksp->dtype) {
1041 case KS8695_DTYPE_HPNA:
1042 /* No phy means no autonegotiation on hpna */
1043 return -EINVAL;
1044 case KS8695_DTYPE_WAN:
1045 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
1046
1047 if ((ctrl & WMC_WAND) == 0)
1048 writel(ctrl | WMC_WANR,
1049 ksp->phyiface_regs + KS8695_WMC);
1050 else
1051 /* auto-negotiation not enabled */
1052 return -EINVAL;
1053 break;
1054 case KS8695_DTYPE_LAN:
1055 return -EOPNOTSUPP;
1056 }
1057
1058 return 0;
1059}
1060
1061/**
1062 * ks8695_get_link - Retrieve link status of network interface
1063 * @ndev: The network interface to retrive the link status of.
1064 */
1065static u32
1066ks8695_get_link(struct net_device *ndev)
1067{
1068 struct ks8695_priv *ksp = netdev_priv(ndev);
1069 u32 ctrl;
1070
1071 switch (ksp->dtype) {
1072 case KS8695_DTYPE_HPNA:
1073 /* HPNA always has link */
1074 return 1;
1075 case KS8695_DTYPE_WAN:
1076 /* WAN we can read the PHY for */
1077 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
1078 return ctrl & WMC_WLS;
1079 case KS8695_DTYPE_LAN:
1080 return -EOPNOTSUPP;
1081 }
1082 return 0;
1083}
1084
1085/**
1086 * ks8695_get_pause - Retrieve network pause/flow-control advertising
1087 * @ndev: The device to retrieve settings from
1088 * @param: The structure to fill out with the information
1089 */
1090static void
1091ks8695_get_pause(struct net_device *ndev, struct ethtool_pauseparam *param)
1092{
1093 struct ks8695_priv *ksp = netdev_priv(ndev);
1094 u32 ctrl;
1095
1096 switch (ksp->dtype) {
1097 case KS8695_DTYPE_HPNA:
1098 /* No phy link on hpna to configure */
1099 return;
1100 case KS8695_DTYPE_WAN:
1101 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
1102
1103 /* advertise Pause */
1104 param->autoneg = (ctrl & WMC_WANAP);
1105
1106 /* current Rx Flow-control */
1107 ctrl = ks8695_readreg(ksp, KS8695_DRXC);
1108 param->rx_pause = (ctrl & DRXC_RFCE);
1109
1110 /* current Tx Flow-control */
1111 ctrl = ks8695_readreg(ksp, KS8695_DTXC);
1112 param->tx_pause = (ctrl & DTXC_TFCE);
1113 break;
1114 case KS8695_DTYPE_LAN:
1115 /* The LAN's "phy" is a direct-attached switch */
1116 return;
1117 }
1118}
1119
1120/**
1121 * ks8695_set_pause - Configure pause/flow-control
1122 * @ndev: The device to configure
1123 * @param: The pause parameters to set
1124 *
1125 * TODO: Implement this
1126 */
1127static int
1128ks8695_set_pause(struct net_device *ndev, struct ethtool_pauseparam *param)
1129{
1130 return -EOPNOTSUPP;
1131}
1132
1133/**
1134 * ks8695_get_drvinfo - Retrieve driver information
1135 * @ndev: The network device to retrieve info about
1136 * @info: The info structure to fill out.
1137 */
1138static void
1139ks8695_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info)
1140{
1141 strlcpy(info->driver, MODULENAME, sizeof(info->driver));
1142 strlcpy(info->version, MODULEVERSION, sizeof(info->version));
Kay Sieversc2313552009-03-24 16:38:22 -07001143 strlcpy(info->bus_info, dev_name(ndev->dev.parent),
Daniel Silverstone7a3c66e2008-12-11 21:00:29 -08001144 sizeof(info->bus_info));
1145}
1146
Stephen Hemminger0fc0b732009-09-02 01:03:33 -07001147static const struct ethtool_ops ks8695_ethtool_ops = {
Daniel Silverstone7a3c66e2008-12-11 21:00:29 -08001148 .get_msglevel = ks8695_get_msglevel,
1149 .set_msglevel = ks8695_set_msglevel,
1150 .get_settings = ks8695_get_settings,
1151 .set_settings = ks8695_set_settings,
1152 .nway_reset = ks8695_nwayreset,
1153 .get_link = ks8695_get_link,
1154 .get_pauseparam = ks8695_get_pause,
1155 .set_pauseparam = ks8695_set_pause,
1156 .get_drvinfo = ks8695_get_drvinfo,
1157};
1158
1159/* Network device interface functions */
1160
1161/**
1162 * ks8695_set_mac - Update MAC in net dev and HW
1163 * @ndev: The network device to update
1164 * @addr: The new MAC address to set
1165 */
1166static int
1167ks8695_set_mac(struct net_device *ndev, void *addr)
1168{
1169 struct ks8695_priv *ksp = netdev_priv(ndev);
1170 struct sockaddr *address = addr;
1171
1172 if (!is_valid_ether_addr(address->sa_data))
1173 return -EADDRNOTAVAIL;
1174
1175 memcpy(ndev->dev_addr, address->sa_data, ndev->addr_len);
1176
1177 ks8695_update_mac(ksp);
1178
1179 dev_dbg(ksp->dev, "%s: Updated MAC address to %pM\n",
1180 ndev->name, ndev->dev_addr);
1181
1182 return 0;
1183}
1184
1185/**
1186 * ks8695_set_multicast - Set up the multicast behaviour of the interface
1187 * @ndev: The net_device to configure
1188 *
1189 * This routine, called by the net layer, configures promiscuity
1190 * and multicast reception behaviour for the interface.
1191 */
1192static void
1193ks8695_set_multicast(struct net_device *ndev)
1194{
1195 struct ks8695_priv *ksp = netdev_priv(ndev);
1196 u32 ctrl;
1197
1198 ctrl = ks8695_readreg(ksp, KS8695_DRXC);
1199
1200 if (ndev->flags & IFF_PROMISC) {
1201 /* enable promiscuous mode */
1202 ctrl |= DRXC_RA;
1203 } else if (ndev->flags & ~IFF_PROMISC) {
1204 /* disable promiscuous mode */
1205 ctrl &= ~DRXC_RA;
1206 }
1207
1208 if (ndev->flags & IFF_ALLMULTI) {
1209 /* enable all multicast mode */
1210 ctrl |= DRXC_RM;
1211 } else if (ndev->mc_count > KS8695_NR_ADDRESSES) {
1212 /* more specific multicast addresses than can be
1213 * handled in hardware
1214 */
1215 ctrl |= DRXC_RM;
1216 } else {
1217 /* enable specific multicasts */
1218 ctrl &= ~DRXC_RM;
1219 ks8695_init_partial_multicast(ksp, ndev->mc_list,
1220 ndev->mc_count);
1221 }
1222
1223 ks8695_writereg(ksp, KS8695_DRXC, ctrl);
1224}
1225
1226/**
1227 * ks8695_timeout - Handle a network tx/rx timeout.
1228 * @ndev: The net_device which timed out.
1229 *
1230 * A network transaction timed out, reset the device.
1231 */
1232static void
1233ks8695_timeout(struct net_device *ndev)
1234{
1235 struct ks8695_priv *ksp = netdev_priv(ndev);
1236
1237 netif_stop_queue(ndev);
1238 ks8695_shutdown(ksp);
1239
1240 ks8695_reset(ksp);
1241
1242 ks8695_update_mac(ksp);
1243
1244 /* We ignore the return from this since it managed to init
1245 * before it probably will be okay to init again.
1246 */
1247 ks8695_init_net(ksp);
1248
1249 /* Reconfigure promiscuity etc */
1250 ks8695_set_multicast(ndev);
1251
1252 /* And start the TX queue once more */
1253 netif_start_queue(ndev);
1254}
1255
1256/**
1257 * ks8695_start_xmit - Start a packet transmission
1258 * @skb: The packet to transmit
1259 * @ndev: The network device to send the packet on
1260 *
1261 * This routine, called by the net layer, takes ownership of the
1262 * sk_buff and adds it to the TX ring. It then kicks the TX DMA
1263 * engine to ensure transmission begins.
1264 */
1265static int
1266ks8695_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1267{
1268 struct ks8695_priv *ksp = netdev_priv(ndev);
1269 int buff_n;
1270 dma_addr_t dmap;
1271
1272 spin_lock_irq(&ksp->txq_lock);
1273
1274 if (ksp->tx_ring_used == MAX_TX_DESC) {
1275 /* Somehow we got entered when we have no room */
1276 spin_unlock_irq(&ksp->txq_lock);
1277 return NETDEV_TX_BUSY;
1278 }
1279
1280 buff_n = ksp->tx_ring_next_slot;
1281
1282 BUG_ON(ksp->tx_buffers[buff_n].skb);
1283
1284 dmap = dma_map_single(ksp->dev, skb->data, skb->len, DMA_TO_DEVICE);
1285 if (unlikely(dma_mapping_error(ksp->dev, dmap))) {
1286 /* Failed to DMA map this SKB, give it back for now */
1287 spin_unlock_irq(&ksp->txq_lock);
1288 dev_dbg(ksp->dev, "%s: Could not map DMA memory for "\
1289 "transmission, trying later\n", ndev->name);
1290 return NETDEV_TX_BUSY;
1291 }
1292
1293 ksp->tx_buffers[buff_n].dma_ptr = dmap;
1294 /* Mapped okay, store the buffer pointer and length for later */
1295 ksp->tx_buffers[buff_n].skb = skb;
1296 ksp->tx_buffers[buff_n].length = skb->len;
1297
1298 /* Fill out the TX descriptor */
1299 ksp->tx_ring[buff_n].data_ptr =
1300 cpu_to_le32(ksp->tx_buffers[buff_n].dma_ptr);
1301 ksp->tx_ring[buff_n].status =
1302 cpu_to_le32(TDES_IC | TDES_FS | TDES_LS |
1303 (skb->len & TDES_TBS));
1304
1305 wmb();
1306
1307 /* Hand it over to the hardware */
1308 ksp->tx_ring[buff_n].owner = cpu_to_le32(TDES_OWN);
1309
1310 if (++ksp->tx_ring_used == MAX_TX_DESC)
1311 netif_stop_queue(ndev);
1312
1313 ndev->trans_start = jiffies;
1314
1315 /* Kick the TX DMA in case it decided to go IDLE */
1316 ks8695_writereg(ksp, KS8695_DTSC, 0);
1317
1318 /* And update the next ring slot */
1319 ksp->tx_ring_next_slot = (buff_n + 1) & MAX_TX_DESC_MASK;
1320
1321 spin_unlock_irq(&ksp->txq_lock);
1322 return NETDEV_TX_OK;
1323}
1324
1325/**
1326 * ks8695_stop - Stop (shutdown) a KS8695 ethernet interface
1327 * @ndev: The net_device to stop
1328 *
1329 * This disables the TX queue and cleans up a KS8695 ethernet
1330 * device.
1331 */
1332static int
1333ks8695_stop(struct net_device *ndev)
1334{
1335 struct ks8695_priv *ksp = netdev_priv(ndev);
1336
1337 netif_stop_queue(ndev);
Figo.zhang68d82872009-10-30 03:05:11 +00001338 napi_disable(&ksp->napi);
Daniel Silverstone7a3c66e2008-12-11 21:00:29 -08001339 netif_carrier_off(ndev);
1340
1341 ks8695_shutdown(ksp);
1342
1343 return 0;
1344}
1345
1346/**
1347 * ks8695_open - Open (bring up) a KS8695 ethernet interface
1348 * @ndev: The net_device to open
1349 *
1350 * This resets, configures the MAC, initialises the RX ring and
1351 * DMA engines and starts the TX queue for a KS8695 ethernet
1352 * device.
1353 */
1354static int
1355ks8695_open(struct net_device *ndev)
1356{
1357 struct ks8695_priv *ksp = netdev_priv(ndev);
1358 int ret;
1359
1360 if (!is_valid_ether_addr(ndev->dev_addr))
1361 return -EADDRNOTAVAIL;
1362
1363 ks8695_reset(ksp);
1364
1365 ks8695_update_mac(ksp);
1366
1367 ret = ks8695_init_net(ksp);
1368 if (ret) {
1369 ks8695_shutdown(ksp);
1370 return ret;
1371 }
1372
Figo.zhang68d82872009-10-30 03:05:11 +00001373 napi_enable(&ksp->napi);
Daniel Silverstone7a3c66e2008-12-11 21:00:29 -08001374 netif_start_queue(ndev);
1375
1376 return 0;
1377}
1378
1379/* Platform device driver */
1380
1381/**
1382 * ks8695_init_switch - Init LAN switch to known good defaults.
1383 * @ksp: The device to initialise
1384 *
1385 * This initialises the LAN switch in the KS8695 to a known-good
1386 * set of defaults.
1387 */
1388static void __devinit
1389ks8695_init_switch(struct ks8695_priv *ksp)
1390{
1391 u32 ctrl;
1392
1393 /* Default value for SEC0 according to datasheet */
1394 ctrl = 0x40819e00;
1395
1396 /* LED0 = Speed LED1 = Link/Activity */
1397 ctrl &= ~(SEC0_LLED1S | SEC0_LLED0S);
1398 ctrl |= (LLED0S_LINK | LLED1S_LINK_ACTIVITY);
1399
1400 /* Enable Switch */
1401 ctrl |= SEC0_ENABLE;
1402
1403 writel(ctrl, ksp->phyiface_regs + KS8695_SEC0);
1404
1405 /* Defaults for SEC1 */
1406 writel(0x9400100, ksp->phyiface_regs + KS8695_SEC1);
1407}
1408
1409/**
1410 * ks8695_init_wan_phy - Initialise the WAN PHY to sensible defaults
1411 * @ksp: The device to initialise
1412 *
1413 * This initialises a KS8695's WAN phy to sensible values for
1414 * autonegotiation etc.
1415 */
1416static void __devinit
1417ks8695_init_wan_phy(struct ks8695_priv *ksp)
1418{
1419 u32 ctrl;
1420
1421 /* Support auto-negotiation */
1422 ctrl = (WMC_WANAP | WMC_WANA100F | WMC_WANA100H |
1423 WMC_WANA10F | WMC_WANA10H);
1424
1425 /* LED0 = Activity , LED1 = Link */
1426 ctrl |= (WLED0S_ACTIVITY | WLED1S_LINK);
1427
1428 /* Restart Auto-negotiation */
1429 ctrl |= WMC_WANR;
1430
1431 writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
1432
1433 writel(0, ksp->phyiface_regs + KS8695_WPPM);
1434 writel(0, ksp->phyiface_regs + KS8695_PPS);
1435}
1436
1437static const struct net_device_ops ks8695_netdev_ops = {
1438 .ndo_open = ks8695_open,
1439 .ndo_stop = ks8695_stop,
1440 .ndo_start_xmit = ks8695_start_xmit,
1441 .ndo_tx_timeout = ks8695_timeout,
1442 .ndo_set_mac_address = ks8695_set_mac,
Stephen Hemminger52255bb2009-01-09 10:45:37 +00001443 .ndo_validate_addr = eth_validate_addr,
Daniel Silverstone7a3c66e2008-12-11 21:00:29 -08001444 .ndo_set_multicast_list = ks8695_set_multicast,
1445};
1446
1447/**
1448 * ks8695_probe - Probe and initialise a KS8695 ethernet interface
1449 * @pdev: The platform device to probe
1450 *
1451 * Initialise a KS8695 ethernet device from platform data.
1452 *
1453 * This driver requires at least one IORESOURCE_MEM for the
1454 * registers and two IORESOURCE_IRQ for the RX and TX IRQs
1455 * respectively. It can optionally take an additional
1456 * IORESOURCE_MEM for the switch or phy in the case of the lan or
1457 * wan ports, and an IORESOURCE_IRQ for the link IRQ for the wan
1458 * port.
1459 */
1460static int __devinit
1461ks8695_probe(struct platform_device *pdev)
1462{
1463 struct ks8695_priv *ksp;
1464 struct net_device *ndev;
1465 struct resource *regs_res, *phyiface_res;
1466 struct resource *rxirq_res, *txirq_res, *linkirq_res;
1467 int ret = 0;
1468 int buff_n;
1469 u32 machigh, maclow;
1470
1471 /* Initialise a net_device */
1472 ndev = alloc_etherdev(sizeof(struct ks8695_priv));
1473 if (!ndev) {
1474 dev_err(&pdev->dev, "could not allocate device.\n");
1475 return -ENOMEM;
1476 }
1477
1478 SET_NETDEV_DEV(ndev, &pdev->dev);
1479
1480 dev_dbg(&pdev->dev, "ks8695_probe() called\n");
1481
1482 /* Configure our private structure a little */
1483 ksp = netdev_priv(ndev);
1484 memset(ksp, 0, sizeof(struct ks8695_priv));
1485
1486 ksp->dev = &pdev->dev;
1487 ksp->ndev = ndev;
1488 ksp->msg_enable = NETIF_MSG_LINK;
1489
1490 /* Retrieve resources */
1491 regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1492 phyiface_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1493
1494 rxirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1495 txirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
1496 linkirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
1497
1498 if (!(regs_res && rxirq_res && txirq_res)) {
1499 dev_err(ksp->dev, "insufficient resources\n");
1500 ret = -ENOENT;
1501 goto failure;
1502 }
1503
1504 ksp->regs_req = request_mem_region(regs_res->start,
1505 resource_size(regs_res),
1506 pdev->name);
1507
1508 if (!ksp->regs_req) {
1509 dev_err(ksp->dev, "cannot claim register space\n");
1510 ret = -EIO;
1511 goto failure;
1512 }
1513
1514 ksp->io_regs = ioremap(regs_res->start, resource_size(regs_res));
1515
1516 if (!ksp->io_regs) {
1517 dev_err(ksp->dev, "failed to ioremap registers\n");
1518 ret = -EINVAL;
1519 goto failure;
1520 }
1521
1522 if (phyiface_res) {
1523 ksp->phyiface_req =
1524 request_mem_region(phyiface_res->start,
1525 resource_size(phyiface_res),
1526 phyiface_res->name);
1527
1528 if (!ksp->phyiface_req) {
1529 dev_err(ksp->dev,
1530 "cannot claim switch register space\n");
1531 ret = -EIO;
1532 goto failure;
1533 }
1534
1535 ksp->phyiface_regs = ioremap(phyiface_res->start,
1536 resource_size(phyiface_res));
1537
1538 if (!ksp->phyiface_regs) {
1539 dev_err(ksp->dev,
1540 "failed to ioremap switch registers\n");
1541 ret = -EINVAL;
1542 goto failure;
1543 }
1544 }
1545
1546 ksp->rx_irq = rxirq_res->start;
1547 ksp->rx_irq_name = rxirq_res->name ? rxirq_res->name : "Ethernet RX";
1548 ksp->tx_irq = txirq_res->start;
1549 ksp->tx_irq_name = txirq_res->name ? txirq_res->name : "Ethernet TX";
1550 ksp->link_irq = (linkirq_res ? linkirq_res->start : -1);
1551 ksp->link_irq_name = (linkirq_res && linkirq_res->name) ?
1552 linkirq_res->name : "Ethernet Link";
1553
1554 /* driver system setup */
1555 ndev->netdev_ops = &ks8695_netdev_ops;
1556 SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
1557 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
1558
Figo.zhang68d82872009-10-30 03:05:11 +00001559 netif_napi_add(ndev, &ksp->napi, ks8695_poll, NAPI_WEIGHT);
Figo.zhang31b73ab2009-10-28 03:55:24 -07001560
Daniel Silverstone7a3c66e2008-12-11 21:00:29 -08001561 /* Retrieve the default MAC addr from the chip. */
1562 /* The bootloader should have left it in there for us. */
1563
1564 machigh = ks8695_readreg(ksp, KS8695_MAH);
1565 maclow = ks8695_readreg(ksp, KS8695_MAL);
1566
1567 ndev->dev_addr[0] = (machigh >> 8) & 0xFF;
1568 ndev->dev_addr[1] = machigh & 0xFF;
1569 ndev->dev_addr[2] = (maclow >> 24) & 0xFF;
1570 ndev->dev_addr[3] = (maclow >> 16) & 0xFF;
1571 ndev->dev_addr[4] = (maclow >> 8) & 0xFF;
1572 ndev->dev_addr[5] = maclow & 0xFF;
1573
1574 if (!is_valid_ether_addr(ndev->dev_addr))
1575 dev_warn(ksp->dev, "%s: Invalid ethernet MAC address. Please "
1576 "set using ifconfig\n", ndev->name);
1577
1578 /* In order to be efficient memory-wise, we allocate both
1579 * rings in one go.
1580 */
1581 ksp->ring_base = dma_alloc_coherent(&pdev->dev, RING_DMA_SIZE,
1582 &ksp->ring_base_dma, GFP_KERNEL);
1583 if (!ksp->ring_base) {
1584 ret = -ENOMEM;
1585 goto failure;
1586 }
1587
1588 /* Specify the TX DMA ring buffer */
1589 ksp->tx_ring = ksp->ring_base;
1590 ksp->tx_ring_dma = ksp->ring_base_dma;
1591
1592 /* And initialise the queue's lock */
1593 spin_lock_init(&ksp->txq_lock);
Figo.zhang31b73ab2009-10-28 03:55:24 -07001594 spin_lock_init(&ksp->rx_lock);
Daniel Silverstone7a3c66e2008-12-11 21:00:29 -08001595
1596 /* Specify the RX DMA ring buffer */
1597 ksp->rx_ring = ksp->ring_base + TX_RING_DMA_SIZE;
1598 ksp->rx_ring_dma = ksp->ring_base_dma + TX_RING_DMA_SIZE;
1599
1600 /* Zero the descriptor rings */
1601 memset(ksp->tx_ring, 0, TX_RING_DMA_SIZE);
1602 memset(ksp->rx_ring, 0, RX_RING_DMA_SIZE);
1603
1604 /* Build the rings */
1605 for (buff_n = 0; buff_n < MAX_TX_DESC; ++buff_n) {
1606 ksp->tx_ring[buff_n].next_desc =
1607 cpu_to_le32(ksp->tx_ring_dma +
1608 (sizeof(struct tx_ring_desc) *
1609 ((buff_n + 1) & MAX_TX_DESC_MASK)));
1610 }
1611
1612 for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) {
1613 ksp->rx_ring[buff_n].next_desc =
1614 cpu_to_le32(ksp->rx_ring_dma +
1615 (sizeof(struct rx_ring_desc) *
1616 ((buff_n + 1) & MAX_RX_DESC_MASK)));
1617 }
1618
1619 /* Initialise the port (physically) */
1620 if (ksp->phyiface_regs && ksp->link_irq == -1) {
1621 ks8695_init_switch(ksp);
1622 ksp->dtype = KS8695_DTYPE_LAN;
1623 } else if (ksp->phyiface_regs && ksp->link_irq != -1) {
1624 ks8695_init_wan_phy(ksp);
1625 ksp->dtype = KS8695_DTYPE_WAN;
1626 } else {
1627 /* No initialisation since HPNA does not have a PHY */
1628 ksp->dtype = KS8695_DTYPE_HPNA;
1629 }
1630
1631 /* And bring up the net_device with the net core */
1632 platform_set_drvdata(pdev, ndev);
1633 ret = register_netdev(ndev);
1634
1635 if (ret == 0) {
1636 dev_info(ksp->dev, "ks8695 ethernet (%s) MAC: %pM\n",
1637 ks8695_port_type(ksp), ndev->dev_addr);
1638 } else {
1639 /* Report the failure to register the net_device */
1640 dev_err(ksp->dev, "ks8695net: failed to register netdev.\n");
1641 goto failure;
1642 }
1643
1644 /* All is well */
1645 return 0;
1646
1647 /* Error exit path */
1648failure:
1649 ks8695_release_device(ksp);
1650 free_netdev(ndev);
1651
1652 return ret;
1653}
1654
1655/**
1656 * ks8695_drv_suspend - Suspend a KS8695 ethernet platform device.
1657 * @pdev: The device to suspend
1658 * @state: The suspend state
1659 *
1660 * This routine detaches and shuts down a KS8695 ethernet device.
1661 */
1662static int
1663ks8695_drv_suspend(struct platform_device *pdev, pm_message_t state)
1664{
1665 struct net_device *ndev = platform_get_drvdata(pdev);
1666 struct ks8695_priv *ksp = netdev_priv(ndev);
1667
1668 ksp->in_suspend = 1;
1669
1670 if (netif_running(ndev)) {
1671 netif_device_detach(ndev);
1672 ks8695_shutdown(ksp);
1673 }
1674
1675 return 0;
1676}
1677
1678/**
1679 * ks8695_drv_resume - Resume a KS8695 ethernet platform device.
1680 * @pdev: The device to resume
1681 *
1682 * This routine re-initialises and re-attaches a KS8695 ethernet
1683 * device.
1684 */
1685static int
1686ks8695_drv_resume(struct platform_device *pdev)
1687{
1688 struct net_device *ndev = platform_get_drvdata(pdev);
1689 struct ks8695_priv *ksp = netdev_priv(ndev);
1690
1691 if (netif_running(ndev)) {
1692 ks8695_reset(ksp);
1693 ks8695_init_net(ksp);
1694 ks8695_set_multicast(ndev);
1695 netif_device_attach(ndev);
1696 }
1697
1698 ksp->in_suspend = 0;
1699
1700 return 0;
1701}
1702
1703/**
1704 * ks8695_drv_remove - Remove a KS8695 net device on driver unload.
1705 * @pdev: The platform device to remove
1706 *
1707 * This unregisters and releases a KS8695 ethernet device.
1708 */
1709static int __devexit
1710ks8695_drv_remove(struct platform_device *pdev)
1711{
1712 struct net_device *ndev = platform_get_drvdata(pdev);
1713 struct ks8695_priv *ksp = netdev_priv(ndev);
1714
1715 platform_set_drvdata(pdev, NULL);
Figo.zhang31b73ab2009-10-28 03:55:24 -07001716 netif_napi_del(&ksp->napi);
Daniel Silverstone7a3c66e2008-12-11 21:00:29 -08001717
1718 unregister_netdev(ndev);
1719 ks8695_release_device(ksp);
1720 free_netdev(ndev);
1721
1722 dev_dbg(&pdev->dev, "released and freed device\n");
1723 return 0;
1724}
1725
1726static struct platform_driver ks8695_driver = {
1727 .driver = {
1728 .name = MODULENAME,
1729 .owner = THIS_MODULE,
1730 },
1731 .probe = ks8695_probe,
1732 .remove = __devexit_p(ks8695_drv_remove),
1733 .suspend = ks8695_drv_suspend,
1734 .resume = ks8695_drv_resume,
1735};
1736
1737/* Module interface */
1738
1739static int __init
1740ks8695_init(void)
1741{
1742 printk(KERN_INFO "%s Ethernet driver, V%s\n",
1743 MODULENAME, MODULEVERSION);
1744
1745 return platform_driver_register(&ks8695_driver);
1746}
1747
1748static void __exit
1749ks8695_cleanup(void)
1750{
1751 platform_driver_unregister(&ks8695_driver);
1752}
1753
1754module_init(ks8695_init);
1755module_exit(ks8695_cleanup);
1756
1757MODULE_AUTHOR("Simtec Electronics")
1758MODULE_DESCRIPTION("Micrel KS8695 (Centaur) Ethernet driver");
1759MODULE_LICENSE("GPL");
1760MODULE_ALIAS("platform:" MODULENAME);
1761
1762module_param(watchdog, int, 0400);
1763MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");