blob: 6354ab3a45a651beb59e5b47d97c7d909357a7a9 [file] [log] [blame]
Choi, Davida55c0a0e2009-09-25 14:42:12 +00001/**
2 * drivers/net/ks8851_mll.c
3 * Copyright (c) 2009 Micrel Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19/**
20 * Supports:
21 * KS8851 16bit MLL chip from Micrel Inc.
22 */
23
24#include <linux/module.h>
25#include <linux/kernel.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/ethtool.h>
29#include <linux/cache.h>
30#include <linux/crc32.h>
31#include <linux/mii.h>
32#include <linux/platform_device.h>
33#include <linux/delay.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Choi, Davida55c0a0e2009-09-25 14:42:12 +000035
36#define DRV_NAME "ks8851_mll"
37
38static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
39#define MAX_RECV_FRAMES 32
40#define MAX_BUF_SIZE 2048
41#define TX_BUF_SIZE 2000
42#define RX_BUF_SIZE 2000
43
44#define KS_CCR 0x08
45#define CCR_EEPROM (1 << 9)
46#define CCR_SPI (1 << 8)
47#define CCR_8BIT (1 << 7)
48#define CCR_16BIT (1 << 6)
49#define CCR_32BIT (1 << 5)
50#define CCR_SHARED (1 << 4)
51#define CCR_32PIN (1 << 0)
52
53/* MAC address registers */
54#define KS_MARL 0x10
55#define KS_MARM 0x12
56#define KS_MARH 0x14
57
58#define KS_OBCR 0x20
59#define OBCR_ODS_16MA (1 << 6)
60
61#define KS_EEPCR 0x22
62#define EEPCR_EESA (1 << 4)
63#define EEPCR_EESB (1 << 3)
64#define EEPCR_EEDO (1 << 2)
65#define EEPCR_EESCK (1 << 1)
66#define EEPCR_EECS (1 << 0)
67
68#define KS_MBIR 0x24
69#define MBIR_TXMBF (1 << 12)
70#define MBIR_TXMBFA (1 << 11)
71#define MBIR_RXMBF (1 << 4)
72#define MBIR_RXMBFA (1 << 3)
73
74#define KS_GRR 0x26
75#define GRR_QMU (1 << 1)
76#define GRR_GSR (1 << 0)
77
78#define KS_WFCR 0x2A
79#define WFCR_MPRXE (1 << 7)
80#define WFCR_WF3E (1 << 3)
81#define WFCR_WF2E (1 << 2)
82#define WFCR_WF1E (1 << 1)
83#define WFCR_WF0E (1 << 0)
84
85#define KS_WF0CRC0 0x30
86#define KS_WF0CRC1 0x32
87#define KS_WF0BM0 0x34
88#define KS_WF0BM1 0x36
89#define KS_WF0BM2 0x38
90#define KS_WF0BM3 0x3A
91
92#define KS_WF1CRC0 0x40
93#define KS_WF1CRC1 0x42
94#define KS_WF1BM0 0x44
95#define KS_WF1BM1 0x46
96#define KS_WF1BM2 0x48
97#define KS_WF1BM3 0x4A
98
99#define KS_WF2CRC0 0x50
100#define KS_WF2CRC1 0x52
101#define KS_WF2BM0 0x54
102#define KS_WF2BM1 0x56
103#define KS_WF2BM2 0x58
104#define KS_WF2BM3 0x5A
105
106#define KS_WF3CRC0 0x60
107#define KS_WF3CRC1 0x62
108#define KS_WF3BM0 0x64
109#define KS_WF3BM1 0x66
110#define KS_WF3BM2 0x68
111#define KS_WF3BM3 0x6A
112
113#define KS_TXCR 0x70
114#define TXCR_TCGICMP (1 << 8)
115#define TXCR_TCGUDP (1 << 7)
116#define TXCR_TCGTCP (1 << 6)
117#define TXCR_TCGIP (1 << 5)
118#define TXCR_FTXQ (1 << 4)
119#define TXCR_TXFCE (1 << 3)
120#define TXCR_TXPE (1 << 2)
121#define TXCR_TXCRC (1 << 1)
122#define TXCR_TXE (1 << 0)
123
124#define KS_TXSR 0x72
125#define TXSR_TXLC (1 << 13)
126#define TXSR_TXMC (1 << 12)
127#define TXSR_TXFID_MASK (0x3f << 0)
128#define TXSR_TXFID_SHIFT (0)
129#define TXSR_TXFID_GET(_v) (((_v) >> 0) & 0x3f)
130
131
132#define KS_RXCR1 0x74
133#define RXCR1_FRXQ (1 << 15)
134#define RXCR1_RXUDPFCC (1 << 14)
135#define RXCR1_RXTCPFCC (1 << 13)
136#define RXCR1_RXIPFCC (1 << 12)
137#define RXCR1_RXPAFMA (1 << 11)
138#define RXCR1_RXFCE (1 << 10)
139#define RXCR1_RXEFE (1 << 9)
140#define RXCR1_RXMAFMA (1 << 8)
141#define RXCR1_RXBE (1 << 7)
142#define RXCR1_RXME (1 << 6)
143#define RXCR1_RXUE (1 << 5)
144#define RXCR1_RXAE (1 << 4)
145#define RXCR1_RXINVF (1 << 1)
146#define RXCR1_RXE (1 << 0)
147#define RXCR1_FILTER_MASK (RXCR1_RXINVF | RXCR1_RXAE | \
148 RXCR1_RXMAFMA | RXCR1_RXPAFMA)
149
150#define KS_RXCR2 0x76
151#define RXCR2_SRDBL_MASK (0x7 << 5)
152#define RXCR2_SRDBL_SHIFT (5)
153#define RXCR2_SRDBL_4B (0x0 << 5)
154#define RXCR2_SRDBL_8B (0x1 << 5)
155#define RXCR2_SRDBL_16B (0x2 << 5)
156#define RXCR2_SRDBL_32B (0x3 << 5)
157/* #define RXCR2_SRDBL_FRAME (0x4 << 5) */
158#define RXCR2_IUFFP (1 << 4)
159#define RXCR2_RXIUFCEZ (1 << 3)
160#define RXCR2_UDPLFE (1 << 2)
161#define RXCR2_RXICMPFCC (1 << 1)
162#define RXCR2_RXSAF (1 << 0)
163
164#define KS_TXMIR 0x78
165
166#define KS_RXFHSR 0x7C
167#define RXFSHR_RXFV (1 << 15)
168#define RXFSHR_RXICMPFCS (1 << 13)
169#define RXFSHR_RXIPFCS (1 << 12)
170#define RXFSHR_RXTCPFCS (1 << 11)
171#define RXFSHR_RXUDPFCS (1 << 10)
172#define RXFSHR_RXBF (1 << 7)
173#define RXFSHR_RXMF (1 << 6)
174#define RXFSHR_RXUF (1 << 5)
175#define RXFSHR_RXMR (1 << 4)
176#define RXFSHR_RXFT (1 << 3)
177#define RXFSHR_RXFTL (1 << 2)
178#define RXFSHR_RXRF (1 << 1)
179#define RXFSHR_RXCE (1 << 0)
180#define RXFSHR_ERR (RXFSHR_RXCE | RXFSHR_RXRF |\
181 RXFSHR_RXFTL | RXFSHR_RXMR |\
182 RXFSHR_RXICMPFCS | RXFSHR_RXIPFCS |\
183 RXFSHR_RXTCPFCS)
184#define KS_RXFHBCR 0x7E
185#define RXFHBCR_CNT_MASK 0x0FFF
186
187#define KS_TXQCR 0x80
188#define TXQCR_AETFE (1 << 2)
189#define TXQCR_TXQMAM (1 << 1)
190#define TXQCR_METFE (1 << 0)
191
192#define KS_RXQCR 0x82
193#define RXQCR_RXDTTS (1 << 12)
194#define RXQCR_RXDBCTS (1 << 11)
195#define RXQCR_RXFCTS (1 << 10)
196#define RXQCR_RXIPHTOE (1 << 9)
197#define RXQCR_RXDTTE (1 << 7)
198#define RXQCR_RXDBCTE (1 << 6)
199#define RXQCR_RXFCTE (1 << 5)
200#define RXQCR_ADRFE (1 << 4)
201#define RXQCR_SDA (1 << 3)
202#define RXQCR_RRXEF (1 << 0)
203#define RXQCR_CMD_CNTL (RXQCR_RXFCTE|RXQCR_ADRFE)
204
205#define KS_TXFDPR 0x84
206#define TXFDPR_TXFPAI (1 << 14)
207#define TXFDPR_TXFP_MASK (0x7ff << 0)
208#define TXFDPR_TXFP_SHIFT (0)
209
210#define KS_RXFDPR 0x86
211#define RXFDPR_RXFPAI (1 << 14)
212
213#define KS_RXDTTR 0x8C
214#define KS_RXDBCTR 0x8E
215
216#define KS_IER 0x90
217#define KS_ISR 0x92
218#define IRQ_LCI (1 << 15)
219#define IRQ_TXI (1 << 14)
220#define IRQ_RXI (1 << 13)
221#define IRQ_RXOI (1 << 11)
222#define IRQ_TXPSI (1 << 9)
223#define IRQ_RXPSI (1 << 8)
224#define IRQ_TXSAI (1 << 6)
225#define IRQ_RXWFDI (1 << 5)
226#define IRQ_RXMPDI (1 << 4)
227#define IRQ_LDI (1 << 3)
228#define IRQ_EDI (1 << 2)
229#define IRQ_SPIBEI (1 << 1)
230#define IRQ_DEDI (1 << 0)
231
232#define KS_RXFCTR 0x9C
233#define RXFCTR_THRESHOLD_MASK 0x00FF
234
235#define KS_RXFC 0x9D
236#define RXFCTR_RXFC_MASK (0xff << 8)
237#define RXFCTR_RXFC_SHIFT (8)
238#define RXFCTR_RXFC_GET(_v) (((_v) >> 8) & 0xff)
239#define RXFCTR_RXFCT_MASK (0xff << 0)
240#define RXFCTR_RXFCT_SHIFT (0)
241
242#define KS_TXNTFSR 0x9E
243
244#define KS_MAHTR0 0xA0
245#define KS_MAHTR1 0xA2
246#define KS_MAHTR2 0xA4
247#define KS_MAHTR3 0xA6
248
249#define KS_FCLWR 0xB0
250#define KS_FCHWR 0xB2
251#define KS_FCOWR 0xB4
252
253#define KS_CIDER 0xC0
254#define CIDER_ID 0x8870
255#define CIDER_REV_MASK (0x7 << 1)
256#define CIDER_REV_SHIFT (1)
257#define CIDER_REV_GET(_v) (((_v) >> 1) & 0x7)
258
259#define KS_CGCR 0xC6
260#define KS_IACR 0xC8
261#define IACR_RDEN (1 << 12)
262#define IACR_TSEL_MASK (0x3 << 10)
263#define IACR_TSEL_SHIFT (10)
264#define IACR_TSEL_MIB (0x3 << 10)
265#define IACR_ADDR_MASK (0x1f << 0)
266#define IACR_ADDR_SHIFT (0)
267
268#define KS_IADLR 0xD0
269#define KS_IAHDR 0xD2
270
271#define KS_PMECR 0xD4
272#define PMECR_PME_DELAY (1 << 14)
273#define PMECR_PME_POL (1 << 12)
274#define PMECR_WOL_WAKEUP (1 << 11)
275#define PMECR_WOL_MAGICPKT (1 << 10)
276#define PMECR_WOL_LINKUP (1 << 9)
277#define PMECR_WOL_ENERGY (1 << 8)
278#define PMECR_AUTO_WAKE_EN (1 << 7)
279#define PMECR_WAKEUP_NORMAL (1 << 6)
280#define PMECR_WKEVT_MASK (0xf << 2)
281#define PMECR_WKEVT_SHIFT (2)
282#define PMECR_WKEVT_GET(_v) (((_v) >> 2) & 0xf)
283#define PMECR_WKEVT_ENERGY (0x1 << 2)
284#define PMECR_WKEVT_LINK (0x2 << 2)
285#define PMECR_WKEVT_MAGICPKT (0x4 << 2)
286#define PMECR_WKEVT_FRAME (0x8 << 2)
287#define PMECR_PM_MASK (0x3 << 0)
288#define PMECR_PM_SHIFT (0)
289#define PMECR_PM_NORMAL (0x0 << 0)
290#define PMECR_PM_ENERGY (0x1 << 0)
291#define PMECR_PM_SOFTDOWN (0x2 << 0)
292#define PMECR_PM_POWERSAVE (0x3 << 0)
293
294/* Standard MII PHY data */
295#define KS_P1MBCR 0xE4
296#define P1MBCR_FORCE_FDX (1 << 8)
297
298#define KS_P1MBSR 0xE6
299#define P1MBSR_AN_COMPLETE (1 << 5)
300#define P1MBSR_AN_CAPABLE (1 << 3)
301#define P1MBSR_LINK_UP (1 << 2)
302
303#define KS_PHY1ILR 0xE8
304#define KS_PHY1IHR 0xEA
305#define KS_P1ANAR 0xEC
306#define KS_P1ANLPR 0xEE
307
308#define KS_P1SCLMD 0xF4
309#define P1SCLMD_LEDOFF (1 << 15)
310#define P1SCLMD_TXIDS (1 << 14)
311#define P1SCLMD_RESTARTAN (1 << 13)
312#define P1SCLMD_DISAUTOMDIX (1 << 10)
313#define P1SCLMD_FORCEMDIX (1 << 9)
314#define P1SCLMD_AUTONEGEN (1 << 7)
315#define P1SCLMD_FORCE100 (1 << 6)
316#define P1SCLMD_FORCEFDX (1 << 5)
317#define P1SCLMD_ADV_FLOW (1 << 4)
318#define P1SCLMD_ADV_100BT_FDX (1 << 3)
319#define P1SCLMD_ADV_100BT_HDX (1 << 2)
320#define P1SCLMD_ADV_10BT_FDX (1 << 1)
321#define P1SCLMD_ADV_10BT_HDX (1 << 0)
322
323#define KS_P1CR 0xF6
324#define P1CR_HP_MDIX (1 << 15)
325#define P1CR_REV_POL (1 << 13)
326#define P1CR_OP_100M (1 << 10)
327#define P1CR_OP_FDX (1 << 9)
328#define P1CR_OP_MDI (1 << 7)
329#define P1CR_AN_DONE (1 << 6)
330#define P1CR_LINK_GOOD (1 << 5)
331#define P1CR_PNTR_FLOW (1 << 4)
332#define P1CR_PNTR_100BT_FDX (1 << 3)
333#define P1CR_PNTR_100BT_HDX (1 << 2)
334#define P1CR_PNTR_10BT_FDX (1 << 1)
335#define P1CR_PNTR_10BT_HDX (1 << 0)
336
337/* TX Frame control */
338
339#define TXFR_TXIC (1 << 15)
340#define TXFR_TXFID_MASK (0x3f << 0)
341#define TXFR_TXFID_SHIFT (0)
342
343#define KS_P1SR 0xF8
344#define P1SR_HP_MDIX (1 << 15)
345#define P1SR_REV_POL (1 << 13)
346#define P1SR_OP_100M (1 << 10)
347#define P1SR_OP_FDX (1 << 9)
348#define P1SR_OP_MDI (1 << 7)
349#define P1SR_AN_DONE (1 << 6)
350#define P1SR_LINK_GOOD (1 << 5)
351#define P1SR_PNTR_FLOW (1 << 4)
352#define P1SR_PNTR_100BT_FDX (1 << 3)
353#define P1SR_PNTR_100BT_HDX (1 << 2)
354#define P1SR_PNTR_10BT_FDX (1 << 1)
355#define P1SR_PNTR_10BT_HDX (1 << 0)
356
357#define ENUM_BUS_NONE 0
358#define ENUM_BUS_8BIT 1
359#define ENUM_BUS_16BIT 2
360#define ENUM_BUS_32BIT 3
361
362#define MAX_MCAST_LST 32
363#define HW_MCAST_SIZE 8
364#define MAC_ADDR_LEN 6
365
366/**
367 * union ks_tx_hdr - tx header data
368 * @txb: The header as bytes
369 * @txw: The header as 16bit, little-endian words
370 *
371 * A dual representation of the tx header data to allow
372 * access to individual bytes, and to allow 16bit accesses
373 * with 16bit alignment.
374 */
375union ks_tx_hdr {
376 u8 txb[4];
377 __le16 txw[2];
378};
379
380/**
381 * struct ks_net - KS8851 driver private data
382 * @net_device : The network device we're bound to
383 * @hw_addr : start address of data register.
384 * @hw_addr_cmd : start address of command register.
385 * @txh : temporaly buffer to save status/length.
386 * @lock : Lock to ensure that the device is not accessed when busy.
387 * @pdev : Pointer to platform device.
388 * @mii : The MII state information for the mii calls.
389 * @frame_head_info : frame header information for multi-pkt rx.
390 * @statelock : Lock on this structure for tx list.
391 * @msg_enable : The message flags controlling driver output (see ethtool).
392 * @frame_cnt : number of frames received.
393 * @bus_width : i/o bus width.
394 * @irq : irq number assigned to this device.
395 * @rc_rxqcr : Cached copy of KS_RXQCR.
396 * @rc_txcr : Cached copy of KS_TXCR.
397 * @rc_ier : Cached copy of KS_IER.
398 * @sharedbus : Multipex(addr and data bus) mode indicator.
399 * @cmd_reg_cache : command register cached.
400 * @cmd_reg_cache_int : command register cached. Used in the irq handler.
401 * @promiscuous : promiscuous mode indicator.
402 * @all_mcast : mutlicast indicator.
403 * @mcast_lst_size : size of multicast list.
404 * @mcast_lst : multicast list.
405 * @mcast_bits : multicast enabed.
406 * @mac_addr : MAC address assigned to this device.
407 * @fid : frame id.
408 * @extra_byte : number of extra byte prepended rx pkt.
409 * @enabled : indicator this device works.
410 *
411 * The @lock ensures that the chip is protected when certain operations are
412 * in progress. When the read or write packet transfer is in progress, most
413 * of the chip registers are not accessible until the transfer is finished and
414 * the DMA has been de-asserted.
415 *
416 * The @statelock is used to protect information in the structure which may
417 * need to be accessed via several sources, such as the network driver layer
418 * or one of the work queues.
419 *
420 */
421
422/* Receive multiplex framer header info */
423struct type_frame_head {
424 u16 sts; /* Frame status */
425 u16 len; /* Byte count */
426};
427
428struct ks_net {
429 struct net_device *netdev;
430 void __iomem *hw_addr;
431 void __iomem *hw_addr_cmd;
432 union ks_tx_hdr txh ____cacheline_aligned;
433 struct mutex lock; /* spinlock to be interrupt safe */
434 struct platform_device *pdev;
435 struct mii_if_info mii;
436 struct type_frame_head *frame_head_info;
437 spinlock_t statelock;
438 u32 msg_enable;
439 u32 frame_cnt;
440 int bus_width;
441 int irq;
442
443 u16 rc_rxqcr;
444 u16 rc_txcr;
445 u16 rc_ier;
446 u16 sharedbus;
447 u16 cmd_reg_cache;
448 u16 cmd_reg_cache_int;
449 u16 promiscuous;
450 u16 all_mcast;
451 u16 mcast_lst_size;
452 u8 mcast_lst[MAX_MCAST_LST][MAC_ADDR_LEN];
453 u8 mcast_bits[HW_MCAST_SIZE];
454 u8 mac_addr[6];
455 u8 fid;
456 u8 extra_byte;
457 u8 enabled;
458};
459
460static int msg_enable;
461
462#define ks_info(_ks, _msg...) dev_info(&(_ks)->pdev->dev, _msg)
463#define ks_warn(_ks, _msg...) dev_warn(&(_ks)->pdev->dev, _msg)
464#define ks_dbg(_ks, _msg...) dev_dbg(&(_ks)->pdev->dev, _msg)
465#define ks_err(_ks, _msg...) dev_err(&(_ks)->pdev->dev, _msg)
466
467#define BE3 0x8000 /* Byte Enable 3 */
468#define BE2 0x4000 /* Byte Enable 2 */
469#define BE1 0x2000 /* Byte Enable 1 */
470#define BE0 0x1000 /* Byte Enable 0 */
471
472/**
473 * register read/write calls.
474 *
475 * All these calls issue transactions to access the chip's registers. They
476 * all require that the necessary lock is held to prevent accesses when the
477 * chip is busy transfering packet data (RX/TX FIFO accesses).
478 */
479
480/**
481 * ks_rdreg8 - read 8 bit register from device
482 * @ks : The chip information
483 * @offset: The register address
484 *
485 * Read a 8bit register from the chip, returning the result
486 */
487static u8 ks_rdreg8(struct ks_net *ks, int offset)
488{
489 u16 data;
490 u8 shift_bit = offset & 0x03;
491 u8 shift_data = (offset & 1) << 3;
492 ks->cmd_reg_cache = (u16) offset | (u16)(BE0 << shift_bit);
493 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
494 data = ioread16(ks->hw_addr);
495 return (u8)(data >> shift_data);
496}
497
498/**
499 * ks_rdreg16 - read 16 bit register from device
500 * @ks : The chip information
501 * @offset: The register address
502 *
503 * Read a 16bit register from the chip, returning the result
504 */
505
506static u16 ks_rdreg16(struct ks_net *ks, int offset)
507{
508 ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
509 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
510 return ioread16(ks->hw_addr);
511}
512
513/**
514 * ks_wrreg8 - write 8bit register value to chip
515 * @ks: The chip information
516 * @offset: The register address
517 * @value: The value to write
518 *
519 */
520static void ks_wrreg8(struct ks_net *ks, int offset, u8 value)
521{
522 u8 shift_bit = (offset & 0x03);
523 u16 value_write = (u16)(value << ((offset & 1) << 3));
524 ks->cmd_reg_cache = (u16)offset | (BE0 << shift_bit);
525 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
526 iowrite16(value_write, ks->hw_addr);
527}
528
529/**
530 * ks_wrreg16 - write 16bit register value to chip
531 * @ks: The chip information
532 * @offset: The register address
533 * @value: The value to write
534 *
535 */
536
537static void ks_wrreg16(struct ks_net *ks, int offset, u16 value)
538{
539 ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
540 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
541 iowrite16(value, ks->hw_addr);
542}
543
544/**
545 * ks_inblk - read a block of data from QMU. This is called after sudo DMA mode enabled.
546 * @ks: The chip state
547 * @wptr: buffer address to save data
548 * @len: length in byte to read
549 *
550 */
551static inline void ks_inblk(struct ks_net *ks, u16 *wptr, u32 len)
552{
553 len >>= 1;
554 while (len--)
555 *wptr++ = (u16)ioread16(ks->hw_addr);
556}
557
558/**
559 * ks_outblk - write data to QMU. This is called after sudo DMA mode enabled.
560 * @ks: The chip information
561 * @wptr: buffer address
562 * @len: length in byte to write
563 *
564 */
565static inline void ks_outblk(struct ks_net *ks, u16 *wptr, u32 len)
566{
567 len >>= 1;
568 while (len--)
569 iowrite16(*wptr++, ks->hw_addr);
570}
571
David J. Choi4a91ca42009-11-19 15:34:30 +0000572static void ks_disable_int(struct ks_net *ks)
573{
574 ks_wrreg16(ks, KS_IER, 0x0000);
575} /* ks_disable_int */
576
577static void ks_enable_int(struct ks_net *ks)
578{
579 ks_wrreg16(ks, KS_IER, ks->rc_ier);
580} /* ks_enable_int */
581
Choi, Davida55c0a0e2009-09-25 14:42:12 +0000582/**
583 * ks_tx_fifo_space - return the available hardware buffer size.
584 * @ks: The chip information
585 *
586 */
587static inline u16 ks_tx_fifo_space(struct ks_net *ks)
588{
589 return ks_rdreg16(ks, KS_TXMIR) & 0x1fff;
590}
591
592/**
593 * ks_save_cmd_reg - save the command register from the cache.
594 * @ks: The chip information
595 *
596 */
597static inline void ks_save_cmd_reg(struct ks_net *ks)
598{
599 /*ks8851 MLL has a bug to read back the command register.
600 * So rely on software to save the content of command register.
601 */
602 ks->cmd_reg_cache_int = ks->cmd_reg_cache;
603}
604
605/**
606 * ks_restore_cmd_reg - restore the command register from the cache and
607 * write to hardware register.
608 * @ks: The chip information
609 *
610 */
611static inline void ks_restore_cmd_reg(struct ks_net *ks)
612{
613 ks->cmd_reg_cache = ks->cmd_reg_cache_int;
614 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
615}
616
617/**
618 * ks_set_powermode - set power mode of the device
619 * @ks: The chip information
620 * @pwrmode: The power mode value to write to KS_PMECR.
621 *
622 * Change the power mode of the chip.
623 */
624static void ks_set_powermode(struct ks_net *ks, unsigned pwrmode)
625{
626 unsigned pmecr;
627
628 if (netif_msg_hw(ks))
629 ks_dbg(ks, "setting power mode %d\n", pwrmode);
630
631 ks_rdreg16(ks, KS_GRR);
632 pmecr = ks_rdreg16(ks, KS_PMECR);
633 pmecr &= ~PMECR_PM_MASK;
634 pmecr |= pwrmode;
635
636 ks_wrreg16(ks, KS_PMECR, pmecr);
637}
638
639/**
640 * ks_read_config - read chip configuration of bus width.
641 * @ks: The chip information
642 *
643 */
644static void ks_read_config(struct ks_net *ks)
645{
646 u16 reg_data = 0;
647
648 /* Regardless of bus width, 8 bit read should always work.*/
649 reg_data = ks_rdreg8(ks, KS_CCR) & 0x00FF;
650 reg_data |= ks_rdreg8(ks, KS_CCR+1) << 8;
651
652 /* addr/data bus are multiplexed */
653 ks->sharedbus = (reg_data & CCR_SHARED) == CCR_SHARED;
654
655 /* There are garbage data when reading data from QMU,
656 depending on bus-width.
657 */
658
659 if (reg_data & CCR_8BIT) {
660 ks->bus_width = ENUM_BUS_8BIT;
661 ks->extra_byte = 1;
662 } else if (reg_data & CCR_16BIT) {
663 ks->bus_width = ENUM_BUS_16BIT;
664 ks->extra_byte = 2;
665 } else {
666 ks->bus_width = ENUM_BUS_32BIT;
667 ks->extra_byte = 4;
668 }
669}
670
671/**
672 * ks_soft_reset - issue one of the soft reset to the device
673 * @ks: The device state.
674 * @op: The bit(s) to set in the GRR
675 *
676 * Issue the relevant soft-reset command to the device's GRR register
677 * specified by @op.
678 *
679 * Note, the delays are in there as a caution to ensure that the reset
680 * has time to take effect and then complete. Since the datasheet does
681 * not currently specify the exact sequence, we have chosen something
682 * that seems to work with our device.
683 */
684static void ks_soft_reset(struct ks_net *ks, unsigned op)
685{
686 /* Disable interrupt first */
687 ks_wrreg16(ks, KS_IER, 0x0000);
688 ks_wrreg16(ks, KS_GRR, op);
689 mdelay(10); /* wait a short time to effect reset */
690 ks_wrreg16(ks, KS_GRR, 0);
691 mdelay(1); /* wait for condition to clear */
692}
693
694
David J. Choi4a91ca42009-11-19 15:34:30 +0000695void ks_enable_qmu(struct ks_net *ks)
696{
697 u16 w;
698
699 w = ks_rdreg16(ks, KS_TXCR);
700 /* Enables QMU Transmit (TXCR). */
701 ks_wrreg16(ks, KS_TXCR, w | TXCR_TXE);
702
703 /*
704 * RX Frame Count Threshold Enable and Auto-Dequeue RXQ Frame
705 * Enable
706 */
707
708 w = ks_rdreg16(ks, KS_RXQCR);
709 ks_wrreg16(ks, KS_RXQCR, w | RXQCR_RXFCTE);
710
711 /* Enables QMU Receive (RXCR1). */
712 w = ks_rdreg16(ks, KS_RXCR1);
713 ks_wrreg16(ks, KS_RXCR1, w | RXCR1_RXE);
714 ks->enabled = true;
715} /* ks_enable_qmu */
716
717static void ks_disable_qmu(struct ks_net *ks)
718{
719 u16 w;
720
721 w = ks_rdreg16(ks, KS_TXCR);
722
723 /* Disables QMU Transmit (TXCR). */
724 w &= ~TXCR_TXE;
725 ks_wrreg16(ks, KS_TXCR, w);
726
727 /* Disables QMU Receive (RXCR1). */
728 w = ks_rdreg16(ks, KS_RXCR1);
729 w &= ~RXCR1_RXE ;
730 ks_wrreg16(ks, KS_RXCR1, w);
731
732 ks->enabled = false;
733
734} /* ks_disable_qmu */
735
Choi, Davida55c0a0e2009-09-25 14:42:12 +0000736/**
737 * ks_read_qmu - read 1 pkt data from the QMU.
738 * @ks: The chip information
739 * @buf: buffer address to save 1 pkt
740 * @len: Pkt length
741 * Here is the sequence to read 1 pkt:
742 * 1. set sudo DMA mode
743 * 2. read prepend data
744 * 3. read pkt data
745 * 4. reset sudo DMA Mode
746 */
747static inline void ks_read_qmu(struct ks_net *ks, u16 *buf, u32 len)
748{
749 u32 r = ks->extra_byte & 0x1 ;
750 u32 w = ks->extra_byte - r;
751
752 /* 1. set sudo DMA mode */
753 ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
754 ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
755
756 /* 2. read prepend data */
757 /**
758 * read 4 + extra bytes and discard them.
759 * extra bytes for dummy, 2 for status, 2 for len
760 */
761
762 /* use likely(r) for 8 bit access for performance */
763 if (unlikely(r))
764 ioread8(ks->hw_addr);
765 ks_inblk(ks, buf, w + 2 + 2);
766
767 /* 3. read pkt data */
768 ks_inblk(ks, buf, ALIGN(len, 4));
769
770 /* 4. reset sudo DMA Mode */
771 ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
772}
773
774/**
775 * ks_rcv - read multiple pkts data from the QMU.
776 * @ks: The chip information
777 * @netdev: The network device being opened.
778 *
779 * Read all of header information before reading pkt content.
780 * It is not allowed only port of pkts in QMU after issuing
781 * interrupt ack.
782 */
783static void ks_rcv(struct ks_net *ks, struct net_device *netdev)
784{
785 u32 i;
786 struct type_frame_head *frame_hdr = ks->frame_head_info;
787 struct sk_buff *skb;
788
789 ks->frame_cnt = ks_rdreg16(ks, KS_RXFCTR) >> 8;
790
791 /* read all header information */
792 for (i = 0; i < ks->frame_cnt; i++) {
793 /* Checking Received packet status */
794 frame_hdr->sts = ks_rdreg16(ks, KS_RXFHSR);
795 /* Get packet len from hardware */
796 frame_hdr->len = ks_rdreg16(ks, KS_RXFHBCR);
797 frame_hdr++;
798 }
799
800 frame_hdr = ks->frame_head_info;
801 while (ks->frame_cnt--) {
802 skb = dev_alloc_skb(frame_hdr->len + 16);
803 if (likely(skb && (frame_hdr->sts & RXFSHR_RXFV) &&
804 (frame_hdr->len < RX_BUF_SIZE) && frame_hdr->len)) {
805 skb_reserve(skb, 2);
806 /* read data block including CRC 4 bytes */
David J. Choi4a91ca42009-11-19 15:34:30 +0000807 ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len);
Choi, Davida55c0a0e2009-09-25 14:42:12 +0000808 skb_put(skb, frame_hdr->len);
809 skb->dev = netdev;
810 skb->protocol = eth_type_trans(skb, netdev);
811 netif_rx(skb);
812 } else {
813 printk(KERN_ERR "%s: err:skb alloc\n", __func__);
814 ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
815 if (skb)
816 dev_kfree_skb_irq(skb);
817 }
818 frame_hdr++;
819 }
820}
821
822/**
823 * ks_update_link_status - link status update.
824 * @netdev: The network device being opened.
825 * @ks: The chip information
826 *
827 */
828
829static void ks_update_link_status(struct net_device *netdev, struct ks_net *ks)
830{
831 /* check the status of the link */
832 u32 link_up_status;
833 if (ks_rdreg16(ks, KS_P1SR) & P1SR_LINK_GOOD) {
834 netif_carrier_on(netdev);
835 link_up_status = true;
836 } else {
837 netif_carrier_off(netdev);
838 link_up_status = false;
839 }
840 if (netif_msg_link(ks))
841 ks_dbg(ks, "%s: %s\n",
842 __func__, link_up_status ? "UP" : "DOWN");
843}
844
845/**
846 * ks_irq - device interrupt handler
847 * @irq: Interrupt number passed from the IRQ hnalder.
848 * @pw: The private word passed to register_irq(), our struct ks_net.
849 *
850 * This is the handler invoked to find out what happened
851 *
852 * Read the interrupt status, work out what needs to be done and then clear
853 * any of the interrupts that are not needed.
854 */
855
856static irqreturn_t ks_irq(int irq, void *pw)
857{
Choi, Davidaeedba82010-01-27 06:03:16 +0000858 struct net_device *netdev = pw;
859 struct ks_net *ks = netdev_priv(netdev);
Choi, Davida55c0a0e2009-09-25 14:42:12 +0000860 u16 status;
861
862 /*this should be the first in IRQ handler */
863 ks_save_cmd_reg(ks);
864
865 status = ks_rdreg16(ks, KS_ISR);
866 if (unlikely(!status)) {
867 ks_restore_cmd_reg(ks);
868 return IRQ_NONE;
869 }
870
871 ks_wrreg16(ks, KS_ISR, status);
872
873 if (likely(status & IRQ_RXI))
874 ks_rcv(ks, netdev);
875
876 if (unlikely(status & IRQ_LCI))
877 ks_update_link_status(netdev, ks);
878
879 if (unlikely(status & IRQ_TXI))
880 netif_wake_queue(netdev);
881
882 if (unlikely(status & IRQ_LDI)) {
883
884 u16 pmecr = ks_rdreg16(ks, KS_PMECR);
885 pmecr &= ~PMECR_WKEVT_MASK;
886 ks_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK);
887 }
888
889 /* this should be the last in IRQ handler*/
890 ks_restore_cmd_reg(ks);
891 return IRQ_HANDLED;
892}
893
894
895/**
896 * ks_net_open - open network device
897 * @netdev: The network device being opened.
898 *
899 * Called when the network device is marked active, such as a user executing
900 * 'ifconfig up' on the device.
901 */
902static int ks_net_open(struct net_device *netdev)
903{
904 struct ks_net *ks = netdev_priv(netdev);
905 int err;
906
907#define KS_INT_FLAGS (IRQF_DISABLED|IRQF_TRIGGER_LOW)
908 /* lock the card, even if we may not actually do anything
909 * else at the moment.
910 */
911
912 if (netif_msg_ifup(ks))
913 ks_dbg(ks, "%s - entry\n", __func__);
914
915 /* reset the HW */
David J. Choi4a91ca42009-11-19 15:34:30 +0000916 err = request_irq(ks->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, netdev);
Choi, Davida55c0a0e2009-09-25 14:42:12 +0000917
918 if (err) {
919 printk(KERN_ERR "Failed to request IRQ: %d: %d\n",
920 ks->irq, err);
921 return err;
922 }
923
David J. Choi4a91ca42009-11-19 15:34:30 +0000924 /* wake up powermode to normal mode */
925 ks_set_powermode(ks, PMECR_PM_NORMAL);
926 mdelay(1); /* wait for normal mode to take effect */
927
928 ks_wrreg16(ks, KS_ISR, 0xffff);
929 ks_enable_int(ks);
930 ks_enable_qmu(ks);
931 netif_start_queue(ks->netdev);
932
Choi, Davida55c0a0e2009-09-25 14:42:12 +0000933 if (netif_msg_ifup(ks))
934 ks_dbg(ks, "network device %s up\n", netdev->name);
935
936 return 0;
937}
938
939/**
940 * ks_net_stop - close network device
941 * @netdev: The device being closed.
942 *
943 * Called to close down a network device which has been active. Cancell any
944 * work, shutdown the RX and TX process and then place the chip into a low
945 * power state whilst it is not being used.
946 */
947static int ks_net_stop(struct net_device *netdev)
948{
949 struct ks_net *ks = netdev_priv(netdev);
950
951 if (netif_msg_ifdown(ks))
952 ks_info(ks, "%s: shutting down\n", netdev->name);
953
954 netif_stop_queue(netdev);
955
Choi, Davida55c0a0e2009-09-25 14:42:12 +0000956 mutex_lock(&ks->lock);
957
958 /* turn off the IRQs and ack any outstanding */
959 ks_wrreg16(ks, KS_IER, 0x0000);
960 ks_wrreg16(ks, KS_ISR, 0xffff);
961
David J. Choi4a91ca42009-11-19 15:34:30 +0000962 /* shutdown RX/TX QMU */
963 ks_disable_qmu(ks);
Choi, Davida55c0a0e2009-09-25 14:42:12 +0000964
965 /* set powermode to soft power down to save power */
966 ks_set_powermode(ks, PMECR_PM_SOFTDOWN);
967 free_irq(ks->irq, netdev);
968 mutex_unlock(&ks->lock);
969 return 0;
970}
971
972
973/**
974 * ks_write_qmu - write 1 pkt data to the QMU.
975 * @ks: The chip information
976 * @pdata: buffer address to save 1 pkt
977 * @len: Pkt length in byte
978 * Here is the sequence to write 1 pkt:
979 * 1. set sudo DMA mode
980 * 2. write status/length
981 * 3. write pkt data
982 * 4. reset sudo DMA Mode
983 * 5. reset sudo DMA mode
984 * 6. Wait until pkt is out
985 */
986static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len)
987{
Choi, Davida55c0a0e2009-09-25 14:42:12 +0000988 /* start header at txb[0] to align txw entries */
David J. Choi4a91ca42009-11-19 15:34:30 +0000989 ks->txh.txw[0] = 0;
Choi, Davida55c0a0e2009-09-25 14:42:12 +0000990 ks->txh.txw[1] = cpu_to_le16(len);
991
992 /* 1. set sudo-DMA mode */
993 ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
994 /* 2. write status/lenth info */
995 ks_outblk(ks, ks->txh.txw, 4);
996 /* 3. write pkt data */
997 ks_outblk(ks, (u16 *)pdata, ALIGN(len, 4));
998 /* 4. reset sudo-DMA mode */
999 ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
1000 /* 5. Enqueue Tx(move the pkt from TX buffer into TXQ) */
1001 ks_wrreg16(ks, KS_TXQCR, TXQCR_METFE);
1002 /* 6. wait until TXQCR_METFE is auto-cleared */
1003 while (ks_rdreg16(ks, KS_TXQCR) & TXQCR_METFE)
1004 ;
1005}
1006
Choi, Davida55c0a0e2009-09-25 14:42:12 +00001007/**
1008 * ks_start_xmit - transmit packet
1009 * @skb : The buffer to transmit
1010 * @netdev : The device used to transmit the packet.
1011 *
1012 * Called by the network layer to transmit the @skb.
1013 * spin_lock_irqsave is required because tx and rx should be mutual exclusive.
1014 * So while tx is in-progress, prevent IRQ interrupt from happenning.
1015 */
1016static int ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1017{
1018 int retv = NETDEV_TX_OK;
1019 struct ks_net *ks = netdev_priv(netdev);
1020
1021 disable_irq(netdev->irq);
1022 ks_disable_int(ks);
1023 spin_lock(&ks->statelock);
1024
1025 /* Extra space are required:
1026 * 4 byte for alignment, 4 for status/length, 4 for CRC
1027 */
1028
1029 if (likely(ks_tx_fifo_space(ks) >= skb->len + 12)) {
1030 ks_write_qmu(ks, skb->data, skb->len);
1031 dev_kfree_skb(skb);
1032 } else
1033 retv = NETDEV_TX_BUSY;
1034 spin_unlock(&ks->statelock);
1035 ks_enable_int(ks);
1036 enable_irq(netdev->irq);
1037 return retv;
1038}
1039
1040/**
1041 * ks_start_rx - ready to serve pkts
1042 * @ks : The chip information
1043 *
1044 */
1045static void ks_start_rx(struct ks_net *ks)
1046{
1047 u16 cntl;
1048
1049 /* Enables QMU Receive (RXCR1). */
1050 cntl = ks_rdreg16(ks, KS_RXCR1);
1051 cntl |= RXCR1_RXE ;
1052 ks_wrreg16(ks, KS_RXCR1, cntl);
1053} /* ks_start_rx */
1054
1055/**
1056 * ks_stop_rx - stop to serve pkts
1057 * @ks : The chip information
1058 *
1059 */
1060static void ks_stop_rx(struct ks_net *ks)
1061{
1062 u16 cntl;
1063
1064 /* Disables QMU Receive (RXCR1). */
1065 cntl = ks_rdreg16(ks, KS_RXCR1);
1066 cntl &= ~RXCR1_RXE ;
1067 ks_wrreg16(ks, KS_RXCR1, cntl);
1068
1069} /* ks_stop_rx */
1070
1071static unsigned long const ethernet_polynomial = 0x04c11db7U;
1072
1073static unsigned long ether_gen_crc(int length, u8 *data)
1074{
1075 long crc = -1;
1076 while (--length >= 0) {
1077 u8 current_octet = *data++;
1078 int bit;
1079
1080 for (bit = 0; bit < 8; bit++, current_octet >>= 1) {
1081 crc = (crc << 1) ^
1082 ((crc < 0) ^ (current_octet & 1) ?
1083 ethernet_polynomial : 0);
1084 }
1085 }
1086 return (unsigned long)crc;
1087} /* ether_gen_crc */
1088
1089/**
1090* ks_set_grpaddr - set multicast information
1091* @ks : The chip information
1092*/
1093
1094static void ks_set_grpaddr(struct ks_net *ks)
1095{
1096 u8 i;
1097 u32 index, position, value;
1098
1099 memset(ks->mcast_bits, 0, sizeof(u8) * HW_MCAST_SIZE);
1100
1101 for (i = 0; i < ks->mcast_lst_size; i++) {
1102 position = (ether_gen_crc(6, ks->mcast_lst[i]) >> 26) & 0x3f;
1103 index = position >> 3;
1104 value = 1 << (position & 7);
1105 ks->mcast_bits[index] |= (u8)value;
1106 }
1107
1108 for (i = 0; i < HW_MCAST_SIZE; i++) {
1109 if (i & 1) {
1110 ks_wrreg16(ks, (u16)((KS_MAHTR0 + i) & ~1),
1111 (ks->mcast_bits[i] << 8) |
1112 ks->mcast_bits[i - 1]);
1113 }
1114 }
1115} /* ks_set_grpaddr */
1116
1117/*
1118* ks_clear_mcast - clear multicast information
1119*
1120* @ks : The chip information
1121* This routine removes all mcast addresses set in the hardware.
1122*/
1123
1124static void ks_clear_mcast(struct ks_net *ks)
1125{
1126 u16 i, mcast_size;
1127 for (i = 0; i < HW_MCAST_SIZE; i++)
1128 ks->mcast_bits[i] = 0;
1129
1130 mcast_size = HW_MCAST_SIZE >> 2;
1131 for (i = 0; i < mcast_size; i++)
1132 ks_wrreg16(ks, KS_MAHTR0 + (2*i), 0);
1133}
1134
1135static void ks_set_promis(struct ks_net *ks, u16 promiscuous_mode)
1136{
1137 u16 cntl;
1138 ks->promiscuous = promiscuous_mode;
1139 ks_stop_rx(ks); /* Stop receiving for reconfiguration */
1140 cntl = ks_rdreg16(ks, KS_RXCR1);
1141
1142 cntl &= ~RXCR1_FILTER_MASK;
1143 if (promiscuous_mode)
1144 /* Enable Promiscuous mode */
1145 cntl |= RXCR1_RXAE | RXCR1_RXINVF;
1146 else
1147 /* Disable Promiscuous mode (default normal mode) */
1148 cntl |= RXCR1_RXPAFMA;
1149
1150 ks_wrreg16(ks, KS_RXCR1, cntl);
1151
1152 if (ks->enabled)
1153 ks_start_rx(ks);
1154
1155} /* ks_set_promis */
1156
1157static void ks_set_mcast(struct ks_net *ks, u16 mcast)
1158{
1159 u16 cntl;
1160
1161 ks->all_mcast = mcast;
1162 ks_stop_rx(ks); /* Stop receiving for reconfiguration */
1163 cntl = ks_rdreg16(ks, KS_RXCR1);
1164 cntl &= ~RXCR1_FILTER_MASK;
1165 if (mcast)
1166 /* Enable "Perfect with Multicast address passed mode" */
1167 cntl |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA);
1168 else
1169 /**
1170 * Disable "Perfect with Multicast address passed
1171 * mode" (normal mode).
1172 */
1173 cntl |= RXCR1_RXPAFMA;
1174
1175 ks_wrreg16(ks, KS_RXCR1, cntl);
1176
1177 if (ks->enabled)
1178 ks_start_rx(ks);
1179} /* ks_set_mcast */
1180
1181static void ks_set_rx_mode(struct net_device *netdev)
1182{
1183 struct ks_net *ks = netdev_priv(netdev);
1184 struct dev_mc_list *ptr;
1185
1186 /* Turn on/off promiscuous mode. */
1187 if ((netdev->flags & IFF_PROMISC) == IFF_PROMISC)
1188 ks_set_promis(ks,
1189 (u16)((netdev->flags & IFF_PROMISC) == IFF_PROMISC));
1190 /* Turn on/off all mcast mode. */
1191 else if ((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI)
1192 ks_set_mcast(ks,
1193 (u16)((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI));
1194 else
1195 ks_set_promis(ks, false);
1196
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001197 if ((netdev->flags & IFF_MULTICAST) && netdev_mc_count(netdev)) {
1198 if (netdev_mc_count(netdev) <= MAX_MCAST_LST) {
Choi, Davida55c0a0e2009-09-25 14:42:12 +00001199 int i = 0;
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00001200
1201 netdev_for_each_mc_addr(ptr, netdev) {
Choi, Davida55c0a0e2009-09-25 14:42:12 +00001202 if (!(*ptr->dmi_addr & 1))
1203 continue;
1204 if (i >= MAX_MCAST_LST)
1205 break;
1206 memcpy(ks->mcast_lst[i++], ptr->dmi_addr,
1207 MAC_ADDR_LEN);
1208 }
1209 ks->mcast_lst_size = (u8)i;
1210 ks_set_grpaddr(ks);
1211 } else {
1212 /**
1213 * List too big to support so
1214 * turn on all mcast mode.
1215 */
1216 ks->mcast_lst_size = MAX_MCAST_LST;
1217 ks_set_mcast(ks, true);
1218 }
1219 } else {
1220 ks->mcast_lst_size = 0;
1221 ks_clear_mcast(ks);
1222 }
1223} /* ks_set_rx_mode */
1224
1225static void ks_set_mac(struct ks_net *ks, u8 *data)
1226{
1227 u16 *pw = (u16 *)data;
1228 u16 w, u;
1229
1230 ks_stop_rx(ks); /* Stop receiving for reconfiguration */
1231
1232 u = *pw++;
1233 w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
1234 ks_wrreg16(ks, KS_MARH, w);
1235
1236 u = *pw++;
1237 w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
1238 ks_wrreg16(ks, KS_MARM, w);
1239
1240 u = *pw;
1241 w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
1242 ks_wrreg16(ks, KS_MARL, w);
1243
1244 memcpy(ks->mac_addr, data, 6);
1245
1246 if (ks->enabled)
1247 ks_start_rx(ks);
1248}
1249
1250static int ks_set_mac_address(struct net_device *netdev, void *paddr)
1251{
1252 struct ks_net *ks = netdev_priv(netdev);
1253 struct sockaddr *addr = paddr;
1254 u8 *da;
1255
1256 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1257
1258 da = (u8 *)netdev->dev_addr;
1259
1260 ks_set_mac(ks, da);
1261 return 0;
1262}
1263
1264static int ks_net_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
1265{
1266 struct ks_net *ks = netdev_priv(netdev);
1267
1268 if (!netif_running(netdev))
1269 return -EINVAL;
1270
1271 return generic_mii_ioctl(&ks->mii, if_mii(req), cmd, NULL);
1272}
1273
1274static const struct net_device_ops ks_netdev_ops = {
1275 .ndo_open = ks_net_open,
1276 .ndo_stop = ks_net_stop,
1277 .ndo_do_ioctl = ks_net_ioctl,
1278 .ndo_start_xmit = ks_start_xmit,
1279 .ndo_set_mac_address = ks_set_mac_address,
1280 .ndo_set_rx_mode = ks_set_rx_mode,
1281 .ndo_change_mtu = eth_change_mtu,
1282 .ndo_validate_addr = eth_validate_addr,
1283};
1284
1285/* ethtool support */
1286
1287static void ks_get_drvinfo(struct net_device *netdev,
1288 struct ethtool_drvinfo *di)
1289{
1290 strlcpy(di->driver, DRV_NAME, sizeof(di->driver));
1291 strlcpy(di->version, "1.00", sizeof(di->version));
1292 strlcpy(di->bus_info, dev_name(netdev->dev.parent),
1293 sizeof(di->bus_info));
1294}
1295
1296static u32 ks_get_msglevel(struct net_device *netdev)
1297{
1298 struct ks_net *ks = netdev_priv(netdev);
1299 return ks->msg_enable;
1300}
1301
1302static void ks_set_msglevel(struct net_device *netdev, u32 to)
1303{
1304 struct ks_net *ks = netdev_priv(netdev);
1305 ks->msg_enable = to;
1306}
1307
1308static int ks_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
1309{
1310 struct ks_net *ks = netdev_priv(netdev);
1311 return mii_ethtool_gset(&ks->mii, cmd);
1312}
1313
1314static int ks_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
1315{
1316 struct ks_net *ks = netdev_priv(netdev);
1317 return mii_ethtool_sset(&ks->mii, cmd);
1318}
1319
1320static u32 ks_get_link(struct net_device *netdev)
1321{
1322 struct ks_net *ks = netdev_priv(netdev);
1323 return mii_link_ok(&ks->mii);
1324}
1325
1326static int ks_nway_reset(struct net_device *netdev)
1327{
1328 struct ks_net *ks = netdev_priv(netdev);
1329 return mii_nway_restart(&ks->mii);
1330}
1331
1332static const struct ethtool_ops ks_ethtool_ops = {
1333 .get_drvinfo = ks_get_drvinfo,
1334 .get_msglevel = ks_get_msglevel,
1335 .set_msglevel = ks_set_msglevel,
1336 .get_settings = ks_get_settings,
1337 .set_settings = ks_set_settings,
1338 .get_link = ks_get_link,
1339 .nway_reset = ks_nway_reset,
1340};
1341
1342/* MII interface controls */
1343
1344/**
1345 * ks_phy_reg - convert MII register into a KS8851 register
1346 * @reg: MII register number.
1347 *
1348 * Return the KS8851 register number for the corresponding MII PHY register
1349 * if possible. Return zero if the MII register has no direct mapping to the
1350 * KS8851 register set.
1351 */
1352static int ks_phy_reg(int reg)
1353{
1354 switch (reg) {
1355 case MII_BMCR:
1356 return KS_P1MBCR;
1357 case MII_BMSR:
1358 return KS_P1MBSR;
1359 case MII_PHYSID1:
1360 return KS_PHY1ILR;
1361 case MII_PHYSID2:
1362 return KS_PHY1IHR;
1363 case MII_ADVERTISE:
1364 return KS_P1ANAR;
1365 case MII_LPA:
1366 return KS_P1ANLPR;
1367 }
1368
1369 return 0x0;
1370}
1371
1372/**
1373 * ks_phy_read - MII interface PHY register read.
1374 * @netdev: The network device the PHY is on.
1375 * @phy_addr: Address of PHY (ignored as we only have one)
1376 * @reg: The register to read.
1377 *
1378 * This call reads data from the PHY register specified in @reg. Since the
1379 * device does not support all the MII registers, the non-existant values
1380 * are always returned as zero.
1381 *
1382 * We return zero for unsupported registers as the MII code does not check
1383 * the value returned for any error status, and simply returns it to the
1384 * caller. The mii-tool that the driver was tested with takes any -ve error
1385 * as real PHY capabilities, thus displaying incorrect data to the user.
1386 */
1387static int ks_phy_read(struct net_device *netdev, int phy_addr, int reg)
1388{
1389 struct ks_net *ks = netdev_priv(netdev);
1390 int ksreg;
1391 int result;
1392
1393 ksreg = ks_phy_reg(reg);
1394 if (!ksreg)
1395 return 0x0; /* no error return allowed, so use zero */
1396
1397 mutex_lock(&ks->lock);
1398 result = ks_rdreg16(ks, ksreg);
1399 mutex_unlock(&ks->lock);
1400
1401 return result;
1402}
1403
1404static void ks_phy_write(struct net_device *netdev,
1405 int phy, int reg, int value)
1406{
1407 struct ks_net *ks = netdev_priv(netdev);
1408 int ksreg;
1409
1410 ksreg = ks_phy_reg(reg);
1411 if (ksreg) {
1412 mutex_lock(&ks->lock);
1413 ks_wrreg16(ks, ksreg, value);
1414 mutex_unlock(&ks->lock);
1415 }
1416}
1417
1418/**
1419 * ks_read_selftest - read the selftest memory info.
1420 * @ks: The device state
1421 *
1422 * Read and check the TX/RX memory selftest information.
1423 */
1424static int ks_read_selftest(struct ks_net *ks)
1425{
1426 unsigned both_done = MBIR_TXMBF | MBIR_RXMBF;
1427 int ret = 0;
1428 unsigned rd;
1429
1430 rd = ks_rdreg16(ks, KS_MBIR);
1431
1432 if ((rd & both_done) != both_done) {
1433 ks_warn(ks, "Memory selftest not finished\n");
1434 return 0;
1435 }
1436
1437 if (rd & MBIR_TXMBFA) {
1438 ks_err(ks, "TX memory selftest fails\n");
1439 ret |= 1;
1440 }
1441
1442 if (rd & MBIR_RXMBFA) {
1443 ks_err(ks, "RX memory selftest fails\n");
1444 ret |= 2;
1445 }
1446
1447 ks_info(ks, "the selftest passes\n");
1448 return ret;
1449}
1450
Choi, Davida55c0a0e2009-09-25 14:42:12 +00001451static void ks_setup(struct ks_net *ks)
1452{
1453 u16 w;
1454
1455 /**
1456 * Configure QMU Transmit
1457 */
1458
1459 /* Setup Transmit Frame Data Pointer Auto-Increment (TXFDPR) */
1460 ks_wrreg16(ks, KS_TXFDPR, TXFDPR_TXFPAI);
1461
1462 /* Setup Receive Frame Data Pointer Auto-Increment */
1463 ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
1464
1465 /* Setup Receive Frame Threshold - 1 frame (RXFCTFC) */
1466 ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_THRESHOLD_MASK);
1467
1468 /* Setup RxQ Command Control (RXQCR) */
1469 ks->rc_rxqcr = RXQCR_CMD_CNTL;
1470 ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
1471
1472 /**
1473 * set the force mode to half duplex, default is full duplex
1474 * because if the auto-negotiation fails, most switch uses
1475 * half-duplex.
1476 */
1477
1478 w = ks_rdreg16(ks, KS_P1MBCR);
1479 w &= ~P1MBCR_FORCE_FDX;
1480 ks_wrreg16(ks, KS_P1MBCR, w);
1481
1482 w = TXCR_TXFCE | TXCR_TXPE | TXCR_TXCRC | TXCR_TCGIP;
1483 ks_wrreg16(ks, KS_TXCR, w);
1484
David J. Choi4a91ca42009-11-19 15:34:30 +00001485 w = RXCR1_RXFCE | RXCR1_RXBE | RXCR1_RXUE | RXCR1_RXME | RXCR1_RXIPFCC;
Choi, Davida55c0a0e2009-09-25 14:42:12 +00001486
1487 if (ks->promiscuous) /* bPromiscuous */
1488 w |= (RXCR1_RXAE | RXCR1_RXINVF);
1489 else if (ks->all_mcast) /* Multicast address passed mode */
1490 w |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA);
1491 else /* Normal mode */
1492 w |= RXCR1_RXPAFMA;
1493
1494 ks_wrreg16(ks, KS_RXCR1, w);
1495} /*ks_setup */
1496
1497
1498static void ks_setup_int(struct ks_net *ks)
1499{
1500 ks->rc_ier = 0x00;
1501 /* Clear the interrupts status of the hardware. */
1502 ks_wrreg16(ks, KS_ISR, 0xffff);
1503
1504 /* Enables the interrupts of the hardware. */
1505 ks->rc_ier = (IRQ_LCI | IRQ_TXI | IRQ_RXI);
1506} /* ks_setup_int */
1507
Choi, Davida55c0a0e2009-09-25 14:42:12 +00001508static int ks_hw_init(struct ks_net *ks)
1509{
1510#define MHEADER_SIZE (sizeof(struct type_frame_head) * MAX_RECV_FRAMES)
1511 ks->promiscuous = 0;
1512 ks->all_mcast = 0;
1513 ks->mcast_lst_size = 0;
1514
1515 ks->frame_head_info = (struct type_frame_head *) \
1516 kmalloc(MHEADER_SIZE, GFP_KERNEL);
1517 if (!ks->frame_head_info) {
1518 printk(KERN_ERR "Error: Fail to allocate frame memory\n");
1519 return false;
1520 }
1521
1522 ks_set_mac(ks, KS_DEFAULT_MAC_ADDRESS);
1523 return true;
1524}
1525
1526
1527static int __devinit ks8851_probe(struct platform_device *pdev)
1528{
1529 int err = -ENOMEM;
1530 struct resource *io_d, *io_c;
1531 struct net_device *netdev;
1532 struct ks_net *ks;
1533 u16 id, data;
1534
1535 io_d = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1536 io_c = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1537
1538 if (!request_mem_region(io_d->start, resource_size(io_d), DRV_NAME))
1539 goto err_mem_region;
1540
1541 if (!request_mem_region(io_c->start, resource_size(io_c), DRV_NAME))
1542 goto err_mem_region1;
1543
1544 netdev = alloc_etherdev(sizeof(struct ks_net));
1545 if (!netdev)
1546 goto err_alloc_etherdev;
1547
1548 SET_NETDEV_DEV(netdev, &pdev->dev);
1549
1550 ks = netdev_priv(netdev);
1551 ks->netdev = netdev;
1552 ks->hw_addr = ioremap(io_d->start, resource_size(io_d));
1553
1554 if (!ks->hw_addr)
1555 goto err_ioremap;
1556
1557 ks->hw_addr_cmd = ioremap(io_c->start, resource_size(io_c));
1558 if (!ks->hw_addr_cmd)
1559 goto err_ioremap1;
1560
1561 ks->irq = platform_get_irq(pdev, 0);
1562
1563 if (ks->irq < 0) {
1564 err = ks->irq;
1565 goto err_get_irq;
1566 }
1567
1568 ks->pdev = pdev;
1569
1570 mutex_init(&ks->lock);
1571 spin_lock_init(&ks->statelock);
1572
1573 netdev->netdev_ops = &ks_netdev_ops;
1574 netdev->ethtool_ops = &ks_ethtool_ops;
1575
1576 /* setup mii state */
1577 ks->mii.dev = netdev;
1578 ks->mii.phy_id = 1,
1579 ks->mii.phy_id_mask = 1;
1580 ks->mii.reg_num_mask = 0xf;
1581 ks->mii.mdio_read = ks_phy_read;
1582 ks->mii.mdio_write = ks_phy_write;
1583
1584 ks_info(ks, "message enable is %d\n", msg_enable);
1585 /* set the default message enable */
1586 ks->msg_enable = netif_msg_init(msg_enable, (NETIF_MSG_DRV |
1587 NETIF_MSG_PROBE |
1588 NETIF_MSG_LINK));
1589 ks_read_config(ks);
1590
1591 /* simple check for a valid chip being connected to the bus */
1592 if ((ks_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
1593 ks_err(ks, "failed to read device ID\n");
1594 err = -ENODEV;
1595 goto err_register;
1596 }
1597
1598 if (ks_read_selftest(ks)) {
1599 ks_err(ks, "failed to read device ID\n");
1600 err = -ENODEV;
1601 goto err_register;
1602 }
1603
1604 err = register_netdev(netdev);
1605 if (err)
1606 goto err_register;
1607
1608 platform_set_drvdata(pdev, netdev);
1609
1610 ks_soft_reset(ks, GRR_GSR);
1611 ks_hw_init(ks);
David J. Choi4a91ca42009-11-19 15:34:30 +00001612 ks_disable_qmu(ks);
Choi, Davida55c0a0e2009-09-25 14:42:12 +00001613 ks_setup(ks);
1614 ks_setup_int(ks);
Choi, Davida55c0a0e2009-09-25 14:42:12 +00001615 memcpy(netdev->dev_addr, ks->mac_addr, 6);
1616
1617 data = ks_rdreg16(ks, KS_OBCR);
1618 ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16MA);
1619
1620 /**
1621 * If you want to use the default MAC addr,
1622 * comment out the 2 functions below.
1623 */
1624
1625 random_ether_addr(netdev->dev_addr);
1626 ks_set_mac(ks, netdev->dev_addr);
1627
1628 id = ks_rdreg16(ks, KS_CIDER);
1629
1630 printk(KERN_INFO DRV_NAME
1631 " Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
1632 (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
1633 return 0;
1634
1635err_register:
1636err_get_irq:
1637 iounmap(ks->hw_addr_cmd);
1638err_ioremap1:
1639 iounmap(ks->hw_addr);
1640err_ioremap:
1641 free_netdev(netdev);
1642err_alloc_etherdev:
1643 release_mem_region(io_c->start, resource_size(io_c));
1644err_mem_region1:
1645 release_mem_region(io_d->start, resource_size(io_d));
1646err_mem_region:
1647 return err;
1648}
1649
1650static int __devexit ks8851_remove(struct platform_device *pdev)
1651{
1652 struct net_device *netdev = platform_get_drvdata(pdev);
1653 struct ks_net *ks = netdev_priv(netdev);
1654 struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1655
David J. Choi4a91ca42009-11-19 15:34:30 +00001656 kfree(ks->frame_head_info);
Choi, Davida55c0a0e2009-09-25 14:42:12 +00001657 unregister_netdev(netdev);
1658 iounmap(ks->hw_addr);
1659 free_netdev(netdev);
1660 release_mem_region(iomem->start, resource_size(iomem));
1661 platform_set_drvdata(pdev, NULL);
1662 return 0;
1663
1664}
1665
1666static struct platform_driver ks8851_platform_driver = {
1667 .driver = {
1668 .name = DRV_NAME,
1669 .owner = THIS_MODULE,
1670 },
1671 .probe = ks8851_probe,
1672 .remove = __devexit_p(ks8851_remove),
1673};
1674
1675static int __init ks8851_init(void)
1676{
1677 return platform_driver_register(&ks8851_platform_driver);
1678}
1679
1680static void __exit ks8851_exit(void)
1681{
1682 platform_driver_unregister(&ks8851_platform_driver);
1683}
1684
1685module_init(ks8851_init);
1686module_exit(ks8851_exit);
1687
1688MODULE_DESCRIPTION("KS8851 MLL Network driver");
1689MODULE_AUTHOR("David Choi <david.choi@micrel.com>");
1690MODULE_LICENSE("GPL");
1691module_param_named(message, msg_enable, int, 0);
1692MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");
1693