blob: ccb1f8172cab67435378464928c107e8db7337e9 [file] [log] [blame]
David Daneyd6aa60a2009-10-14 12:04:41 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
David Daneyeeae05a2012-08-21 11:45:06 -07006 * Copyright (C) 2009-2012 Cavium, Inc
David Daneyd6aa60a2009-10-14 12:04:41 -07007 */
8
David Daneyd6aa60a2009-10-14 12:04:41 -07009#include <linux/platform_device.h>
David Daney368bec02012-07-05 18:12:39 +020010#include <linux/dma-mapping.h>
David Daneyd6aa60a2009-10-14 12:04:41 -070011#include <linux/etherdevice.h>
David Daney368bec02012-07-05 18:12:39 +020012#include <linux/capability.h>
Chad Reese3d305852012-08-21 11:45:07 -070013#include <linux/net_tstamp.h>
David Daney368bec02012-07-05 18:12:39 +020014#include <linux/interrupt.h>
15#include <linux/netdevice.h>
16#include <linux/spinlock.h>
David Daneyd6aa60a2009-10-14 12:04:41 -070017#include <linux/if_vlan.h>
David Daney368bec02012-07-05 18:12:39 +020018#include <linux/of_mdio.h>
19#include <linux/module.h>
20#include <linux/of_net.h>
21#include <linux/init.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090022#include <linux/slab.h>
David Daneyd6aa60a2009-10-14 12:04:41 -070023#include <linux/phy.h>
David Daney368bec02012-07-05 18:12:39 +020024#include <linux/io.h>
David Daneyd6aa60a2009-10-14 12:04:41 -070025
26#include <asm/octeon/octeon.h>
27#include <asm/octeon/cvmx-mixx-defs.h>
28#include <asm/octeon/cvmx-agl-defs.h>
29
30#define DRV_NAME "octeon_mgmt"
31#define DRV_VERSION "2.0"
32#define DRV_DESCRIPTION \
33 "Cavium Networks Octeon MII (management) port Network Driver"
34
35#define OCTEON_MGMT_NAPI_WEIGHT 16
36
37/*
38 * Ring sizes that are powers of two allow for more efficient modulo
39 * opertions.
40 */
41#define OCTEON_MGMT_RX_RING_SIZE 512
42#define OCTEON_MGMT_TX_RING_SIZE 128
43
44/* Allow 8 bytes for vlan and FCS. */
45#define OCTEON_MGMT_RX_HEADROOM (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
46
47union mgmt_port_ring_entry {
48 u64 d64;
49 struct {
50 u64 reserved_62_63:2;
51 /* Length of the buffer/packet in bytes */
52 u64 len:14;
53 /* For TX, signals that the packet should be timestamped */
54 u64 tstamp:1;
55 /* The RX error code */
56 u64 code:7;
57#define RING_ENTRY_CODE_DONE 0xf
58#define RING_ENTRY_CODE_MORE 0x10
59 /* Physical address of the buffer */
60 u64 addr:40;
61 } s;
62};
63
David Daney368bec02012-07-05 18:12:39 +020064#define MIX_ORING1 0x0
65#define MIX_ORING2 0x8
66#define MIX_IRING1 0x10
67#define MIX_IRING2 0x18
68#define MIX_CTL 0x20
69#define MIX_IRHWM 0x28
70#define MIX_IRCNT 0x30
71#define MIX_ORHWM 0x38
72#define MIX_ORCNT 0x40
73#define MIX_ISR 0x48
74#define MIX_INTENA 0x50
75#define MIX_REMCNT 0x58
76#define MIX_BIST 0x78
77
78#define AGL_GMX_PRT_CFG 0x10
79#define AGL_GMX_RX_FRM_CTL 0x18
80#define AGL_GMX_RX_FRM_MAX 0x30
81#define AGL_GMX_RX_JABBER 0x38
82#define AGL_GMX_RX_STATS_CTL 0x50
83
84#define AGL_GMX_RX_STATS_PKTS_DRP 0xb0
85#define AGL_GMX_RX_STATS_OCTS_DRP 0xb8
86#define AGL_GMX_RX_STATS_PKTS_BAD 0xc0
87
88#define AGL_GMX_RX_ADR_CTL 0x100
89#define AGL_GMX_RX_ADR_CAM_EN 0x108
90#define AGL_GMX_RX_ADR_CAM0 0x180
91#define AGL_GMX_RX_ADR_CAM1 0x188
92#define AGL_GMX_RX_ADR_CAM2 0x190
93#define AGL_GMX_RX_ADR_CAM3 0x198
94#define AGL_GMX_RX_ADR_CAM4 0x1a0
95#define AGL_GMX_RX_ADR_CAM5 0x1a8
96
David Daneyeeae05a2012-08-21 11:45:06 -070097#define AGL_GMX_TX_CLK 0x208
David Daney368bec02012-07-05 18:12:39 +020098#define AGL_GMX_TX_STATS_CTL 0x268
99#define AGL_GMX_TX_CTL 0x270
100#define AGL_GMX_TX_STAT0 0x280
101#define AGL_GMX_TX_STAT1 0x288
102#define AGL_GMX_TX_STAT2 0x290
103#define AGL_GMX_TX_STAT3 0x298
104#define AGL_GMX_TX_STAT4 0x2a0
105#define AGL_GMX_TX_STAT5 0x2a8
106#define AGL_GMX_TX_STAT6 0x2b0
107#define AGL_GMX_TX_STAT7 0x2b8
108#define AGL_GMX_TX_STAT8 0x2c0
109#define AGL_GMX_TX_STAT9 0x2c8
110
David Daneyd6aa60a2009-10-14 12:04:41 -0700111struct octeon_mgmt {
112 struct net_device *netdev;
David Daney368bec02012-07-05 18:12:39 +0200113 u64 mix;
114 u64 agl;
David Daneyeeae05a2012-08-21 11:45:06 -0700115 u64 agl_prt_ctl;
David Daneyd6aa60a2009-10-14 12:04:41 -0700116 int port;
117 int irq;
Chad Reese3d305852012-08-21 11:45:07 -0700118 bool has_rx_tstamp;
David Daneyd6aa60a2009-10-14 12:04:41 -0700119 u64 *tx_ring;
120 dma_addr_t tx_ring_handle;
121 unsigned int tx_next;
122 unsigned int tx_next_clean;
123 unsigned int tx_current_fill;
124 /* The tx_list lock also protects the ring related variables */
125 struct sk_buff_head tx_list;
126
127 /* RX variables only touched in napi_poll. No locking necessary. */
128 u64 *rx_ring;
129 dma_addr_t rx_ring_handle;
130 unsigned int rx_next;
131 unsigned int rx_next_fill;
132 unsigned int rx_current_fill;
133 struct sk_buff_head rx_list;
134
135 spinlock_t lock;
136 unsigned int last_duplex;
137 unsigned int last_link;
David Daneyeeae05a2012-08-21 11:45:06 -0700138 unsigned int last_speed;
David Daneyd6aa60a2009-10-14 12:04:41 -0700139 struct device *dev;
140 struct napi_struct napi;
141 struct tasklet_struct tx_clean_tasklet;
142 struct phy_device *phydev;
David Daney368bec02012-07-05 18:12:39 +0200143 struct device_node *phy_np;
144 resource_size_t mix_phys;
145 resource_size_t mix_size;
146 resource_size_t agl_phys;
147 resource_size_t agl_size;
David Daneyeeae05a2012-08-21 11:45:06 -0700148 resource_size_t agl_prt_ctl_phys;
149 resource_size_t agl_prt_ctl_size;
David Daneyd6aa60a2009-10-14 12:04:41 -0700150};
151
152static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable)
153{
David Daneyd6aa60a2009-10-14 12:04:41 -0700154 union cvmx_mixx_intena mix_intena;
155 unsigned long flags;
156
157 spin_lock_irqsave(&p->lock, flags);
David Daney368bec02012-07-05 18:12:39 +0200158 mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA);
David Daneyd6aa60a2009-10-14 12:04:41 -0700159 mix_intena.s.ithena = enable ? 1 : 0;
David Daney368bec02012-07-05 18:12:39 +0200160 cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700161 spin_unlock_irqrestore(&p->lock, flags);
162}
163
164static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable)
165{
David Daneyd6aa60a2009-10-14 12:04:41 -0700166 union cvmx_mixx_intena mix_intena;
167 unsigned long flags;
168
169 spin_lock_irqsave(&p->lock, flags);
David Daney368bec02012-07-05 18:12:39 +0200170 mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA);
David Daneyd6aa60a2009-10-14 12:04:41 -0700171 mix_intena.s.othena = enable ? 1 : 0;
David Daney368bec02012-07-05 18:12:39 +0200172 cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700173 spin_unlock_irqrestore(&p->lock, flags);
174}
175
David Daneye96f7512012-08-21 11:45:11 -0700176static void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p)
David Daneyd6aa60a2009-10-14 12:04:41 -0700177{
178 octeon_mgmt_set_rx_irq(p, 1);
179}
180
David Daneye96f7512012-08-21 11:45:11 -0700181static void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p)
David Daneyd6aa60a2009-10-14 12:04:41 -0700182{
183 octeon_mgmt_set_rx_irq(p, 0);
184}
185
David Daneye96f7512012-08-21 11:45:11 -0700186static void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p)
David Daneyd6aa60a2009-10-14 12:04:41 -0700187{
188 octeon_mgmt_set_tx_irq(p, 1);
189}
190
David Daneye96f7512012-08-21 11:45:11 -0700191static void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p)
David Daneyd6aa60a2009-10-14 12:04:41 -0700192{
193 octeon_mgmt_set_tx_irq(p, 0);
194}
195
196static unsigned int ring_max_fill(unsigned int ring_size)
197{
198 return ring_size - 8;
199}
200
201static unsigned int ring_size_to_bytes(unsigned int ring_size)
202{
203 return ring_size * sizeof(union mgmt_port_ring_entry);
204}
205
206static void octeon_mgmt_rx_fill_ring(struct net_device *netdev)
207{
208 struct octeon_mgmt *p = netdev_priv(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -0700209
210 while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) {
211 unsigned int size;
212 union mgmt_port_ring_entry re;
213 struct sk_buff *skb;
214
215 /* CN56XX pass 1 needs 8 bytes of padding. */
216 size = netdev->mtu + OCTEON_MGMT_RX_HEADROOM + 8 + NET_IP_ALIGN;
217
218 skb = netdev_alloc_skb(netdev, size);
219 if (!skb)
220 break;
221 skb_reserve(skb, NET_IP_ALIGN);
222 __skb_queue_tail(&p->rx_list, skb);
223
224 re.d64 = 0;
225 re.s.len = size;
226 re.s.addr = dma_map_single(p->dev, skb->data,
227 size,
228 DMA_FROM_DEVICE);
229
230 /* Put it in the ring. */
231 p->rx_ring[p->rx_next_fill] = re.d64;
232 dma_sync_single_for_device(p->dev, p->rx_ring_handle,
233 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
234 DMA_BIDIRECTIONAL);
235 p->rx_next_fill =
236 (p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE;
237 p->rx_current_fill++;
238 /* Ring the bell. */
David Daney368bec02012-07-05 18:12:39 +0200239 cvmx_write_csr(p->mix + MIX_IRING2, 1);
David Daneyd6aa60a2009-10-14 12:04:41 -0700240 }
241}
242
Chad Reese3d305852012-08-21 11:45:07 -0700243static ktime_t ptp_to_ktime(u64 ptptime)
244{
245 ktime_t ktimebase;
246 u64 ptpbase;
247 unsigned long flags;
248
249 local_irq_save(flags);
250 /* Fill the icache with the code */
251 ktime_get_real();
252 /* Flush all pending operations */
253 mb();
254 /* Read the time and PTP clock as close together as
255 * possible. It is important that this sequence take the same
256 * amount of time to reduce jitter
257 */
258 ktimebase = ktime_get_real();
259 ptpbase = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_HI);
260 local_irq_restore(flags);
261
262 return ktime_sub_ns(ktimebase, ptpbase - ptptime);
263}
264
David Daneyd6aa60a2009-10-14 12:04:41 -0700265static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
266{
David Daneyd6aa60a2009-10-14 12:04:41 -0700267 union cvmx_mixx_orcnt mix_orcnt;
268 union mgmt_port_ring_entry re;
269 struct sk_buff *skb;
270 int cleaned = 0;
271 unsigned long flags;
272
David Daney368bec02012-07-05 18:12:39 +0200273 mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
David Daneyd6aa60a2009-10-14 12:04:41 -0700274 while (mix_orcnt.s.orcnt) {
David Daney4d30b802010-05-05 13:03:09 +0000275 spin_lock_irqsave(&p->tx_list.lock, flags);
276
David Daney368bec02012-07-05 18:12:39 +0200277 mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
David Daney4d30b802010-05-05 13:03:09 +0000278
279 if (mix_orcnt.s.orcnt == 0) {
280 spin_unlock_irqrestore(&p->tx_list.lock, flags);
281 break;
282 }
283
David Daneyd6aa60a2009-10-14 12:04:41 -0700284 dma_sync_single_for_cpu(p->dev, p->tx_ring_handle,
285 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
286 DMA_BIDIRECTIONAL);
287
David Daneyd6aa60a2009-10-14 12:04:41 -0700288 re.d64 = p->tx_ring[p->tx_next_clean];
289 p->tx_next_clean =
290 (p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE;
291 skb = __skb_dequeue(&p->tx_list);
292
293 mix_orcnt.u64 = 0;
294 mix_orcnt.s.orcnt = 1;
295
296 /* Acknowledge to hardware that we have the buffer. */
David Daney368bec02012-07-05 18:12:39 +0200297 cvmx_write_csr(p->mix + MIX_ORCNT, mix_orcnt.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700298 p->tx_current_fill--;
299
300 spin_unlock_irqrestore(&p->tx_list.lock, flags);
301
302 dma_unmap_single(p->dev, re.s.addr, re.s.len,
303 DMA_TO_DEVICE);
Chad Reese3d305852012-08-21 11:45:07 -0700304
305 /* Read the hardware TX timestamp if one was recorded */
306 if (unlikely(re.s.tstamp)) {
307 struct skb_shared_hwtstamps ts;
308 /* Read the timestamp */
309 u64 ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port));
310 /* Remove the timestamp from the FIFO */
311 cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0);
312 /* Tell the kernel about the timestamp */
313 ts.syststamp = ptp_to_ktime(ns);
314 ts.hwtstamp = ns_to_ktime(ns);
315 skb_tstamp_tx(skb, &ts);
316 }
317
David Daneyd6aa60a2009-10-14 12:04:41 -0700318 dev_kfree_skb_any(skb);
319 cleaned++;
320
David Daney368bec02012-07-05 18:12:39 +0200321 mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
David Daneyd6aa60a2009-10-14 12:04:41 -0700322 }
323
324 if (cleaned && netif_queue_stopped(p->netdev))
325 netif_wake_queue(p->netdev);
326}
327
328static void octeon_mgmt_clean_tx_tasklet(unsigned long arg)
329{
330 struct octeon_mgmt *p = (struct octeon_mgmt *)arg;
331 octeon_mgmt_clean_tx_buffers(p);
332 octeon_mgmt_enable_tx_irq(p);
333}
334
335static void octeon_mgmt_update_rx_stats(struct net_device *netdev)
336{
337 struct octeon_mgmt *p = netdev_priv(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -0700338 unsigned long flags;
339 u64 drop, bad;
340
341 /* These reads also clear the count registers. */
David Daney368bec02012-07-05 18:12:39 +0200342 drop = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP);
343 bad = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD);
David Daneyd6aa60a2009-10-14 12:04:41 -0700344
345 if (drop || bad) {
346 /* Do an atomic update. */
347 spin_lock_irqsave(&p->lock, flags);
348 netdev->stats.rx_errors += bad;
349 netdev->stats.rx_dropped += drop;
350 spin_unlock_irqrestore(&p->lock, flags);
351 }
352}
353
354static void octeon_mgmt_update_tx_stats(struct net_device *netdev)
355{
356 struct octeon_mgmt *p = netdev_priv(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -0700357 unsigned long flags;
358
359 union cvmx_agl_gmx_txx_stat0 s0;
360 union cvmx_agl_gmx_txx_stat1 s1;
361
362 /* These reads also clear the count registers. */
David Daney368bec02012-07-05 18:12:39 +0200363 s0.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT0);
364 s1.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT1);
David Daneyd6aa60a2009-10-14 12:04:41 -0700365
366 if (s0.s.xsdef || s0.s.xscol || s1.s.scol || s1.s.mcol) {
367 /* Do an atomic update. */
368 spin_lock_irqsave(&p->lock, flags);
369 netdev->stats.tx_errors += s0.s.xsdef + s0.s.xscol;
370 netdev->stats.collisions += s1.s.scol + s1.s.mcol;
371 spin_unlock_irqrestore(&p->lock, flags);
372 }
373}
374
375/*
376 * Dequeue a receive skb and its corresponding ring entry. The ring
377 * entry is returned, *pskb is updated to point to the skb.
378 */
379static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p,
380 struct sk_buff **pskb)
381{
382 union mgmt_port_ring_entry re;
383
384 dma_sync_single_for_cpu(p->dev, p->rx_ring_handle,
385 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
386 DMA_BIDIRECTIONAL);
387
388 re.d64 = p->rx_ring[p->rx_next];
389 p->rx_next = (p->rx_next + 1) % OCTEON_MGMT_RX_RING_SIZE;
390 p->rx_current_fill--;
391 *pskb = __skb_dequeue(&p->rx_list);
392
393 dma_unmap_single(p->dev, re.s.addr,
394 ETH_FRAME_LEN + OCTEON_MGMT_RX_HEADROOM,
395 DMA_FROM_DEVICE);
396
397 return re.d64;
398}
399
400
401static int octeon_mgmt_receive_one(struct octeon_mgmt *p)
402{
David Daneyd6aa60a2009-10-14 12:04:41 -0700403 struct net_device *netdev = p->netdev;
404 union cvmx_mixx_ircnt mix_ircnt;
405 union mgmt_port_ring_entry re;
406 struct sk_buff *skb;
407 struct sk_buff *skb2;
408 struct sk_buff *skb_new;
409 union mgmt_port_ring_entry re2;
410 int rc = 1;
411
412
413 re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb);
414 if (likely(re.s.code == RING_ENTRY_CODE_DONE)) {
415 /* A good packet, send it up. */
416 skb_put(skb, re.s.len);
417good:
Chad Reese3d305852012-08-21 11:45:07 -0700418 /* Process the RX timestamp if it was recorded */
419 if (p->has_rx_tstamp) {
420 /* The first 8 bytes are the timestamp */
421 u64 ns = *(u64 *)skb->data;
422 struct skb_shared_hwtstamps *ts;
423 ts = skb_hwtstamps(skb);
424 ts->hwtstamp = ns_to_ktime(ns);
425 ts->syststamp = ptp_to_ktime(ns);
426 __skb_pull(skb, 8);
427 }
David Daneyd6aa60a2009-10-14 12:04:41 -0700428 skb->protocol = eth_type_trans(skb, netdev);
429 netdev->stats.rx_packets++;
430 netdev->stats.rx_bytes += skb->len;
David Daneyd6aa60a2009-10-14 12:04:41 -0700431 netif_receive_skb(skb);
432 rc = 0;
433 } else if (re.s.code == RING_ENTRY_CODE_MORE) {
434 /*
435 * Packet split across skbs. This can happen if we
436 * increase the MTU. Buffers that are already in the
437 * rx ring can then end up being too small. As the rx
438 * ring is refilled, buffers sized for the new MTU
439 * will be used and we should go back to the normal
440 * non-split case.
441 */
442 skb_put(skb, re.s.len);
443 do {
444 re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
445 if (re2.s.code != RING_ENTRY_CODE_MORE
446 && re2.s.code != RING_ENTRY_CODE_DONE)
447 goto split_error;
448 skb_put(skb2, re2.s.len);
449 skb_new = skb_copy_expand(skb, 0, skb2->len,
450 GFP_ATOMIC);
451 if (!skb_new)
452 goto split_error;
453 if (skb_copy_bits(skb2, 0, skb_tail_pointer(skb_new),
454 skb2->len))
455 goto split_error;
456 skb_put(skb_new, skb2->len);
457 dev_kfree_skb_any(skb);
458 dev_kfree_skb_any(skb2);
459 skb = skb_new;
460 } while (re2.s.code == RING_ENTRY_CODE_MORE);
461 goto good;
462 } else {
463 /* Some other error, discard it. */
464 dev_kfree_skb_any(skb);
465 /*
466 * Error statistics are accumulated in
467 * octeon_mgmt_update_rx_stats.
468 */
469 }
470 goto done;
471split_error:
472 /* Discard the whole mess. */
473 dev_kfree_skb_any(skb);
474 dev_kfree_skb_any(skb2);
475 while (re2.s.code == RING_ENTRY_CODE_MORE) {
476 re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
477 dev_kfree_skb_any(skb2);
478 }
479 netdev->stats.rx_errors++;
480
481done:
482 /* Tell the hardware we processed a packet. */
483 mix_ircnt.u64 = 0;
484 mix_ircnt.s.ircnt = 1;
David Daney368bec02012-07-05 18:12:39 +0200485 cvmx_write_csr(p->mix + MIX_IRCNT, mix_ircnt.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700486 return rc;
David Daneyd6aa60a2009-10-14 12:04:41 -0700487}
488
489static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget)
490{
David Daneyd6aa60a2009-10-14 12:04:41 -0700491 unsigned int work_done = 0;
492 union cvmx_mixx_ircnt mix_ircnt;
493 int rc;
494
David Daney368bec02012-07-05 18:12:39 +0200495 mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT);
David Daneyd6aa60a2009-10-14 12:04:41 -0700496 while (work_done < budget && mix_ircnt.s.ircnt) {
497
498 rc = octeon_mgmt_receive_one(p);
499 if (!rc)
500 work_done++;
501
502 /* Check for more packets. */
David Daney368bec02012-07-05 18:12:39 +0200503 mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT);
David Daneyd6aa60a2009-10-14 12:04:41 -0700504 }
505
506 octeon_mgmt_rx_fill_ring(p->netdev);
507
508 return work_done;
509}
510
511static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget)
512{
513 struct octeon_mgmt *p = container_of(napi, struct octeon_mgmt, napi);
514 struct net_device *netdev = p->netdev;
515 unsigned int work_done = 0;
516
517 work_done = octeon_mgmt_receive_packets(p, budget);
518
519 if (work_done < budget) {
520 /* We stopped because no more packets were available. */
521 napi_complete(napi);
522 octeon_mgmt_enable_rx_irq(p);
523 }
524 octeon_mgmt_update_rx_stats(netdev);
525
526 return work_done;
527}
528
529/* Reset the hardware to clean state. */
530static void octeon_mgmt_reset_hw(struct octeon_mgmt *p)
531{
532 union cvmx_mixx_ctl mix_ctl;
533 union cvmx_mixx_bist mix_bist;
534 union cvmx_agl_gmx_bist agl_gmx_bist;
535
536 mix_ctl.u64 = 0;
David Daney368bec02012-07-05 18:12:39 +0200537 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700538 do {
David Daney368bec02012-07-05 18:12:39 +0200539 mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
David Daneyd6aa60a2009-10-14 12:04:41 -0700540 } while (mix_ctl.s.busy);
541 mix_ctl.s.reset = 1;
David Daney368bec02012-07-05 18:12:39 +0200542 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
543 cvmx_read_csr(p->mix + MIX_CTL);
David Daneyeeae05a2012-08-21 11:45:06 -0700544 octeon_io_clk_delay(64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700545
David Daney368bec02012-07-05 18:12:39 +0200546 mix_bist.u64 = cvmx_read_csr(p->mix + MIX_BIST);
David Daneyd6aa60a2009-10-14 12:04:41 -0700547 if (mix_bist.u64)
548 dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n",
549 (unsigned long long)mix_bist.u64);
550
551 agl_gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST);
552 if (agl_gmx_bist.u64)
553 dev_warn(p->dev, "AGL failed BIST (0x%016llx)\n",
554 (unsigned long long)agl_gmx_bist.u64);
555}
556
557struct octeon_mgmt_cam_state {
558 u64 cam[6];
559 u64 cam_mask;
560 int cam_index;
561};
562
563static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs,
564 unsigned char *addr)
565{
566 int i;
567
568 for (i = 0; i < 6; i++)
569 cs->cam[i] |= (u64)addr[i] << (8 * (cs->cam_index));
570 cs->cam_mask |= (1ULL << cs->cam_index);
571 cs->cam_index++;
572}
573
574static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
575{
576 struct octeon_mgmt *p = netdev_priv(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -0700577 union cvmx_agl_gmx_rxx_adr_ctl adr_ctl;
578 union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx;
579 unsigned long flags;
580 unsigned int prev_packet_enable;
581 unsigned int cam_mode = 1; /* 1 - Accept on CAM match */
582 unsigned int multicast_mode = 1; /* 1 - Reject all multicast. */
583 struct octeon_mgmt_cam_state cam_state;
Jiri Pirko22bedad32010-04-01 21:22:57 +0000584 struct netdev_hw_addr *ha;
David Daneyd6aa60a2009-10-14 12:04:41 -0700585 int available_cam_entries;
586
587 memset(&cam_state, 0, sizeof(cam_state));
588
David Daney62538d22010-05-05 13:03:08 +0000589 if ((netdev->flags & IFF_PROMISC) || netdev->uc.count > 7) {
David Daneyd6aa60a2009-10-14 12:04:41 -0700590 cam_mode = 0;
591 available_cam_entries = 8;
592 } else {
593 /*
594 * One CAM entry for the primary address, leaves seven
595 * for the secondary addresses.
596 */
David Daney62538d22010-05-05 13:03:08 +0000597 available_cam_entries = 7 - netdev->uc.count;
David Daneyd6aa60a2009-10-14 12:04:41 -0700598 }
599
600 if (netdev->flags & IFF_MULTICAST) {
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000601 if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) ||
602 netdev_mc_count(netdev) > available_cam_entries)
David Daney62538d22010-05-05 13:03:08 +0000603 multicast_mode = 2; /* 2 - Accept all multicast. */
David Daneyd6aa60a2009-10-14 12:04:41 -0700604 else
605 multicast_mode = 0; /* 0 - Use CAM. */
606 }
607
608 if (cam_mode == 1) {
609 /* Add primary address. */
610 octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr);
David Daney62538d22010-05-05 13:03:08 +0000611 netdev_for_each_uc_addr(ha, netdev)
612 octeon_mgmt_cam_state_add(&cam_state, ha->addr);
David Daneyd6aa60a2009-10-14 12:04:41 -0700613 }
614 if (multicast_mode == 0) {
Jiri Pirko22bedad32010-04-01 21:22:57 +0000615 netdev_for_each_mc_addr(ha, netdev)
616 octeon_mgmt_cam_state_add(&cam_state, ha->addr);
David Daneyd6aa60a2009-10-14 12:04:41 -0700617 }
618
David Daneyd6aa60a2009-10-14 12:04:41 -0700619 spin_lock_irqsave(&p->lock, flags);
620
621 /* Disable packet I/O. */
David Daney368bec02012-07-05 18:12:39 +0200622 agl_gmx_prtx.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
David Daneyd6aa60a2009-10-14 12:04:41 -0700623 prev_packet_enable = agl_gmx_prtx.s.en;
624 agl_gmx_prtx.s.en = 0;
David Daney368bec02012-07-05 18:12:39 +0200625 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700626
David Daneyd6aa60a2009-10-14 12:04:41 -0700627 adr_ctl.u64 = 0;
628 adr_ctl.s.cam_mode = cam_mode;
629 adr_ctl.s.mcst = multicast_mode;
630 adr_ctl.s.bcst = 1; /* Allow broadcast */
631
David Daney368bec02012-07-05 18:12:39 +0200632 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CTL, adr_ctl.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700633
David Daney368bec02012-07-05 18:12:39 +0200634 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM0, cam_state.cam[0]);
635 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM1, cam_state.cam[1]);
636 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM2, cam_state.cam[2]);
637 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM3, cam_state.cam[3]);
638 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM4, cam_state.cam[4]);
639 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM5, cam_state.cam[5]);
640 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM_EN, cam_state.cam_mask);
David Daneyd6aa60a2009-10-14 12:04:41 -0700641
642 /* Restore packet I/O. */
643 agl_gmx_prtx.s.en = prev_packet_enable;
David Daney368bec02012-07-05 18:12:39 +0200644 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700645
646 spin_unlock_irqrestore(&p->lock, flags);
647}
648
649static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr)
650{
David Daneyf3212382012-08-21 11:45:10 -0700651 int r = eth_mac_addr(netdev, addr);
David Daneyd6aa60a2009-10-14 12:04:41 -0700652
David Daneyf3212382012-08-21 11:45:10 -0700653 if (r)
654 return r;
David Daneyd6aa60a2009-10-14 12:04:41 -0700655
656 octeon_mgmt_set_rx_filtering(netdev);
657
658 return 0;
659}
660
661static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
662{
663 struct octeon_mgmt *p = netdev_priv(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -0700664 int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM;
665
666 /*
667 * Limit the MTU to make sure the ethernet packets are between
668 * 64 bytes and 16383 bytes.
669 */
670 if (size_without_fcs < 64 || size_without_fcs > 16383) {
671 dev_warn(p->dev, "MTU must be between %d and %d.\n",
672 64 - OCTEON_MGMT_RX_HEADROOM,
673 16383 - OCTEON_MGMT_RX_HEADROOM);
674 return -EINVAL;
675 }
676
677 netdev->mtu = new_mtu;
678
David Daney368bec02012-07-05 18:12:39 +0200679 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, size_without_fcs);
680 cvmx_write_csr(p->agl + AGL_GMX_RX_JABBER,
David Daneyd6aa60a2009-10-14 12:04:41 -0700681 (size_without_fcs + 7) & 0xfff8);
682
683 return 0;
684}
685
686static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id)
687{
688 struct net_device *netdev = dev_id;
689 struct octeon_mgmt *p = netdev_priv(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -0700690 union cvmx_mixx_isr mixx_isr;
691
David Daney368bec02012-07-05 18:12:39 +0200692 mixx_isr.u64 = cvmx_read_csr(p->mix + MIX_ISR);
David Daneyd6aa60a2009-10-14 12:04:41 -0700693
694 /* Clear any pending interrupts */
David Daney368bec02012-07-05 18:12:39 +0200695 cvmx_write_csr(p->mix + MIX_ISR, mixx_isr.u64);
696 cvmx_read_csr(p->mix + MIX_ISR);
David Daneyd6aa60a2009-10-14 12:04:41 -0700697
698 if (mixx_isr.s.irthresh) {
699 octeon_mgmt_disable_rx_irq(p);
700 napi_schedule(&p->napi);
701 }
702 if (mixx_isr.s.orthresh) {
703 octeon_mgmt_disable_tx_irq(p);
704 tasklet_schedule(&p->tx_clean_tasklet);
705 }
706
707 return IRQ_HANDLED;
708}
709
Chad Reese3d305852012-08-21 11:45:07 -0700710static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev,
711 struct ifreq *rq, int cmd)
712{
713 struct octeon_mgmt *p = netdev_priv(netdev);
714 struct hwtstamp_config config;
715 union cvmx_mio_ptp_clock_cfg ptp;
716 union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
717 bool have_hw_timestamps = false;
718
719 if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
720 return -EFAULT;
721
722 if (config.flags) /* reserved for future extensions */
723 return -EINVAL;
724
725 /* Check the status of hardware for tiemstamps */
726 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
727 /* Get the current state of the PTP clock */
728 ptp.u64 = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_CFG);
729 if (!ptp.s.ext_clk_en) {
730 /* The clock has not been configured to use an
731 * external source. Program it to use the main clock
732 * reference.
733 */
734 u64 clock_comp = (NSEC_PER_SEC << 32) / octeon_get_io_clock_rate();
735 if (!ptp.s.ptp_en)
736 cvmx_write_csr(CVMX_MIO_PTP_CLOCK_COMP, clock_comp);
737 pr_info("PTP Clock: Using sclk reference at %lld Hz\n",
738 (NSEC_PER_SEC << 32) / clock_comp);
739 } else {
740 /* The clock is already programmed to use a GPIO */
741 u64 clock_comp = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_COMP);
742 pr_info("PTP Clock: Using GPIO %d at %lld Hz\n",
743 ptp.s.ext_clk_in,
744 (NSEC_PER_SEC << 32) / clock_comp);
745 }
746
747 /* Enable the clock if it wasn't done already */
748 if (!ptp.s.ptp_en) {
749 ptp.s.ptp_en = 1;
750 cvmx_write_csr(CVMX_MIO_PTP_CLOCK_CFG, ptp.u64);
751 }
752 have_hw_timestamps = true;
753 }
754
755 if (!have_hw_timestamps)
756 return -EINVAL;
757
758 switch (config.tx_type) {
759 case HWTSTAMP_TX_OFF:
760 case HWTSTAMP_TX_ON:
761 break;
762 default:
763 return -ERANGE;
764 }
765
766 switch (config.rx_filter) {
767 case HWTSTAMP_FILTER_NONE:
768 p->has_rx_tstamp = false;
769 rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
770 rxx_frm_ctl.s.ptp_mode = 0;
771 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
772 break;
773 case HWTSTAMP_FILTER_ALL:
774 case HWTSTAMP_FILTER_SOME:
775 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
776 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
777 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
778 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
779 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
780 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
781 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
782 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
783 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
784 case HWTSTAMP_FILTER_PTP_V2_EVENT:
785 case HWTSTAMP_FILTER_PTP_V2_SYNC:
786 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
787 p->has_rx_tstamp = have_hw_timestamps;
788 config.rx_filter = HWTSTAMP_FILTER_ALL;
789 if (p->has_rx_tstamp) {
790 rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
791 rxx_frm_ctl.s.ptp_mode = 1;
792 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
793 }
794 break;
795 default:
796 return -ERANGE;
797 }
798
799 if (copy_to_user(rq->ifr_data, &config, sizeof(config)))
800 return -EFAULT;
801
802 return 0;
803}
804
David Daneyd6aa60a2009-10-14 12:04:41 -0700805static int octeon_mgmt_ioctl(struct net_device *netdev,
806 struct ifreq *rq, int cmd)
807{
808 struct octeon_mgmt *p = netdev_priv(netdev);
809
Chad Reese3d305852012-08-21 11:45:07 -0700810 switch (cmd) {
811 case SIOCSHWTSTAMP:
812 return octeon_mgmt_ioctl_hwtstamp(netdev, rq, cmd);
813 default:
814 if (p->phydev)
815 return phy_mii_ioctl(p->phydev, rq, cmd);
David Daneyd6aa60a2009-10-14 12:04:41 -0700816 return -EINVAL;
Chad Reese3d305852012-08-21 11:45:07 -0700817 }
David Daneyd6aa60a2009-10-14 12:04:41 -0700818}
819
David Daneyeeae05a2012-08-21 11:45:06 -0700820static void octeon_mgmt_disable_link(struct octeon_mgmt *p)
821{
822 union cvmx_agl_gmx_prtx_cfg prtx_cfg;
823
824 /* Disable GMX before we make any changes. */
825 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
826 prtx_cfg.s.en = 0;
827 prtx_cfg.s.tx_en = 0;
828 prtx_cfg.s.rx_en = 0;
829 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
830
831 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
832 int i;
833 for (i = 0; i < 10; i++) {
834 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
835 if (prtx_cfg.s.tx_idle == 1 || prtx_cfg.s.rx_idle == 1)
836 break;
837 mdelay(1);
838 i++;
839 }
840 }
841}
842
843static void octeon_mgmt_enable_link(struct octeon_mgmt *p)
844{
845 union cvmx_agl_gmx_prtx_cfg prtx_cfg;
846
847 /* Restore the GMX enable state only if link is set */
848 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
849 prtx_cfg.s.tx_en = 1;
850 prtx_cfg.s.rx_en = 1;
851 prtx_cfg.s.en = 1;
852 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
853}
854
855static void octeon_mgmt_update_link(struct octeon_mgmt *p)
856{
857 union cvmx_agl_gmx_prtx_cfg prtx_cfg;
858
859 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
860
861 if (!p->phydev->link)
862 prtx_cfg.s.duplex = 1;
863 else
864 prtx_cfg.s.duplex = p->phydev->duplex;
865
866 switch (p->phydev->speed) {
867 case 10:
868 prtx_cfg.s.speed = 0;
869 prtx_cfg.s.slottime = 0;
870
871 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
872 prtx_cfg.s.burst = 1;
873 prtx_cfg.s.speed_msb = 1;
874 }
875 break;
876 case 100:
877 prtx_cfg.s.speed = 0;
878 prtx_cfg.s.slottime = 0;
879
880 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
881 prtx_cfg.s.burst = 1;
882 prtx_cfg.s.speed_msb = 0;
883 }
884 break;
885 case 1000:
886 /* 1000 MBits is only supported on 6XXX chips */
887 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
888 prtx_cfg.s.speed = 1;
889 prtx_cfg.s.speed_msb = 0;
890 /* Only matters for half-duplex */
891 prtx_cfg.s.slottime = 1;
892 prtx_cfg.s.burst = p->phydev->duplex;
893 }
894 break;
895 case 0: /* No link */
896 default:
897 break;
898 }
899
900 /* Write the new GMX setting with the port still disabled. */
901 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
902
903 /* Read GMX CFG again to make sure the config is completed. */
904 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
905
906 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
907 union cvmx_agl_gmx_txx_clk agl_clk;
908 union cvmx_agl_prtx_ctl prtx_ctl;
909
910 prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
911 agl_clk.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_CLK);
912 /* MII (both speeds) and RGMII 1000 speed. */
913 agl_clk.s.clk_cnt = 1;
914 if (prtx_ctl.s.mode == 0) { /* RGMII mode */
915 if (p->phydev->speed == 10)
916 agl_clk.s.clk_cnt = 50;
917 else if (p->phydev->speed == 100)
918 agl_clk.s.clk_cnt = 5;
919 }
920 cvmx_write_csr(p->agl + AGL_GMX_TX_CLK, agl_clk.u64);
921 }
922}
923
David Daneyd6aa60a2009-10-14 12:04:41 -0700924static void octeon_mgmt_adjust_link(struct net_device *netdev)
925{
926 struct octeon_mgmt *p = netdev_priv(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -0700927 unsigned long flags;
928 int link_changed = 0;
929
David Daneyeeae05a2012-08-21 11:45:06 -0700930 if (!p->phydev)
931 return;
932
David Daneyd6aa60a2009-10-14 12:04:41 -0700933 spin_lock_irqsave(&p->lock, flags);
David Daneyeeae05a2012-08-21 11:45:06 -0700934
935
936 if (!p->phydev->link && p->last_link)
937 link_changed = -1;
938
939 if (p->phydev->link
940 && (p->last_duplex != p->phydev->duplex
941 || p->last_link != p->phydev->link
942 || p->last_speed != p->phydev->speed)) {
943 octeon_mgmt_disable_link(p);
944 link_changed = 1;
945 octeon_mgmt_update_link(p);
946 octeon_mgmt_enable_link(p);
David Daneyd6aa60a2009-10-14 12:04:41 -0700947 }
David Daneyeeae05a2012-08-21 11:45:06 -0700948
David Daneyd6aa60a2009-10-14 12:04:41 -0700949 p->last_link = p->phydev->link;
David Daneyeeae05a2012-08-21 11:45:06 -0700950 p->last_speed = p->phydev->speed;
951 p->last_duplex = p->phydev->duplex;
952
David Daneyd6aa60a2009-10-14 12:04:41 -0700953 spin_unlock_irqrestore(&p->lock, flags);
954
955 if (link_changed != 0) {
956 if (link_changed > 0) {
David Daneyd6aa60a2009-10-14 12:04:41 -0700957 pr_info("%s: Link is up - %d/%s\n", netdev->name,
958 p->phydev->speed,
959 DUPLEX_FULL == p->phydev->duplex ?
960 "Full" : "Half");
961 } else {
David Daneyd6aa60a2009-10-14 12:04:41 -0700962 pr_info("%s: Link is down\n", netdev->name);
963 }
964 }
965}
966
967static int octeon_mgmt_init_phy(struct net_device *netdev)
968{
969 struct octeon_mgmt *p = netdev_priv(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -0700970
David Daney368bec02012-07-05 18:12:39 +0200971 if (octeon_is_simulation() || p->phy_np == NULL) {
David Daneyd6aa60a2009-10-14 12:04:41 -0700972 /* No PHYs in the simulator. */
973 netif_carrier_on(netdev);
974 return 0;
975 }
976
David Daney368bec02012-07-05 18:12:39 +0200977 p->phydev = of_phy_connect(netdev, p->phy_np,
978 octeon_mgmt_adjust_link, 0,
979 PHY_INTERFACE_MODE_MII);
David Daneyd6aa60a2009-10-14 12:04:41 -0700980
David Daneyeeae05a2012-08-21 11:45:06 -0700981 if (p->phydev == NULL)
982 return -ENODEV;
David Daneyd6aa60a2009-10-14 12:04:41 -0700983
984 return 0;
985}
986
987static int octeon_mgmt_open(struct net_device *netdev)
988{
989 struct octeon_mgmt *p = netdev_priv(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -0700990 union cvmx_mixx_ctl mix_ctl;
991 union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode;
992 union cvmx_mixx_oring1 oring1;
993 union cvmx_mixx_iring1 iring1;
David Daneyd6aa60a2009-10-14 12:04:41 -0700994 union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
995 union cvmx_mixx_irhwm mix_irhwm;
996 union cvmx_mixx_orhwm mix_orhwm;
997 union cvmx_mixx_intena mix_intena;
998 struct sockaddr sa;
999
1000 /* Allocate ring buffers. */
1001 p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
1002 GFP_KERNEL);
1003 if (!p->tx_ring)
1004 return -ENOMEM;
1005 p->tx_ring_handle =
1006 dma_map_single(p->dev, p->tx_ring,
1007 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
1008 DMA_BIDIRECTIONAL);
1009 p->tx_next = 0;
1010 p->tx_next_clean = 0;
1011 p->tx_current_fill = 0;
1012
1013
1014 p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
1015 GFP_KERNEL);
1016 if (!p->rx_ring)
1017 goto err_nomem;
1018 p->rx_ring_handle =
1019 dma_map_single(p->dev, p->rx_ring,
1020 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
1021 DMA_BIDIRECTIONAL);
1022
1023 p->rx_next = 0;
1024 p->rx_next_fill = 0;
1025 p->rx_current_fill = 0;
1026
1027 octeon_mgmt_reset_hw(p);
1028
David Daney368bec02012-07-05 18:12:39 +02001029 mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
David Daneyd6aa60a2009-10-14 12:04:41 -07001030
1031 /* Bring it out of reset if needed. */
1032 if (mix_ctl.s.reset) {
1033 mix_ctl.s.reset = 0;
David Daney368bec02012-07-05 18:12:39 +02001034 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -07001035 do {
David Daney368bec02012-07-05 18:12:39 +02001036 mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
David Daneyd6aa60a2009-10-14 12:04:41 -07001037 } while (mix_ctl.s.reset);
1038 }
1039
David Daneyeeae05a2012-08-21 11:45:06 -07001040 if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) {
1041 agl_gmx_inf_mode.u64 = 0;
1042 agl_gmx_inf_mode.s.en = 1;
1043 cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
1044 }
1045 if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
1046 || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
1047 /*
1048 * Force compensation values, as they are not
1049 * determined properly by HW
1050 */
1051 union cvmx_agl_gmx_drv_ctl drv_ctl;
1052
1053 drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
1054 if (p->port) {
1055 drv_ctl.s.byp_en1 = 1;
1056 drv_ctl.s.nctl1 = 6;
1057 drv_ctl.s.pctl1 = 6;
1058 } else {
1059 drv_ctl.s.byp_en = 1;
1060 drv_ctl.s.nctl = 6;
1061 drv_ctl.s.pctl = 6;
1062 }
1063 cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64);
1064 }
David Daneyd6aa60a2009-10-14 12:04:41 -07001065
1066 oring1.u64 = 0;
1067 oring1.s.obase = p->tx_ring_handle >> 3;
1068 oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE;
David Daney368bec02012-07-05 18:12:39 +02001069 cvmx_write_csr(p->mix + MIX_ORING1, oring1.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -07001070
1071 iring1.u64 = 0;
1072 iring1.s.ibase = p->rx_ring_handle >> 3;
1073 iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE;
David Daney368bec02012-07-05 18:12:39 +02001074 cvmx_write_csr(p->mix + MIX_IRING1, iring1.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -07001075
David Daneyd6aa60a2009-10-14 12:04:41 -07001076 memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN);
1077 octeon_mgmt_set_mac_address(netdev, &sa);
1078
1079 octeon_mgmt_change_mtu(netdev, netdev->mtu);
1080
1081 /*
1082 * Enable the port HW. Packets are not allowed until
1083 * cvmx_mgmt_port_enable() is called.
1084 */
1085 mix_ctl.u64 = 0;
1086 mix_ctl.s.crc_strip = 1; /* Strip the ending CRC */
1087 mix_ctl.s.en = 1; /* Enable the port */
1088 mix_ctl.s.nbtarb = 0; /* Arbitration mode */
1089 /* MII CB-request FIFO programmable high watermark */
1090 mix_ctl.s.mrq_hwm = 1;
David Daneyeeae05a2012-08-21 11:45:06 -07001091#ifdef __LITTLE_ENDIAN
1092 mix_ctl.s.lendian = 1;
1093#endif
David Daney368bec02012-07-05 18:12:39 +02001094 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -07001095
David Daneyeeae05a2012-08-21 11:45:06 -07001096 /* Read the PHY to find the mode of the interface. */
1097 if (octeon_mgmt_init_phy(netdev)) {
1098 dev_err(p->dev, "Cannot initialize PHY on MIX%d.\n", p->port);
1099 goto err_noirq;
1100 }
David Daneyd6aa60a2009-10-14 12:04:41 -07001101
David Daneyeeae05a2012-08-21 11:45:06 -07001102 /* Set the mode of the interface, RGMII/MII. */
1103 if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && p->phydev) {
1104 union cvmx_agl_prtx_ctl agl_prtx_ctl;
1105 int rgmii_mode = (p->phydev->supported &
1106 (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)) != 0;
1107
1108 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1109 agl_prtx_ctl.s.mode = rgmii_mode ? 0 : 1;
1110 cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
1111
1112 /* MII clocks counts are based on the 125Mhz
1113 * reference, which has an 8nS period. So our delays
1114 * need to be multiplied by this factor.
1115 */
1116#define NS_PER_PHY_CLK 8
1117
1118 /* Take the DLL and clock tree out of reset */
1119 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1120 agl_prtx_ctl.s.clkrst = 0;
1121 if (rgmii_mode) {
1122 agl_prtx_ctl.s.dllrst = 0;
1123 agl_prtx_ctl.s.clktx_byp = 0;
David Daneyd6aa60a2009-10-14 12:04:41 -07001124 }
David Daneyeeae05a2012-08-21 11:45:06 -07001125 cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
1126 cvmx_read_csr(p->agl_prt_ctl); /* Force write out before wait */
1127
1128 /* Wait for the DLL to lock. External 125 MHz
1129 * reference clock must be stable at this point.
1130 */
1131 ndelay(256 * NS_PER_PHY_CLK);
1132
1133 /* Enable the interface */
1134 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1135 agl_prtx_ctl.s.enable = 1;
1136 cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
1137
1138 /* Read the value back to force the previous write */
1139 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1140
1141 /* Enable the compensation controller */
1142 agl_prtx_ctl.s.comp = 1;
1143 agl_prtx_ctl.s.drv_byp = 0;
1144 cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
1145 /* Force write out before wait. */
1146 cvmx_read_csr(p->agl_prt_ctl);
1147
1148 /* For compensation state to lock. */
1149 ndelay(1040 * NS_PER_PHY_CLK);
1150
1151 /* Some Ethernet switches cannot handle standard
1152 * Interframe Gap, increase to 16 bytes.
1153 */
1154 cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0x88);
David Daneyd6aa60a2009-10-14 12:04:41 -07001155 }
1156
1157 octeon_mgmt_rx_fill_ring(netdev);
1158
1159 /* Clear statistics. */
1160 /* Clear on read. */
David Daney368bec02012-07-05 18:12:39 +02001161 cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_CTL, 1);
1162 cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP, 0);
1163 cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD, 0);
David Daneyd6aa60a2009-10-14 12:04:41 -07001164
David Daney368bec02012-07-05 18:12:39 +02001165 cvmx_write_csr(p->agl + AGL_GMX_TX_STATS_CTL, 1);
1166 cvmx_write_csr(p->agl + AGL_GMX_TX_STAT0, 0);
1167 cvmx_write_csr(p->agl + AGL_GMX_TX_STAT1, 0);
David Daneyd6aa60a2009-10-14 12:04:41 -07001168
1169 /* Clear any pending interrupts */
David Daney368bec02012-07-05 18:12:39 +02001170 cvmx_write_csr(p->mix + MIX_ISR, cvmx_read_csr(p->mix + MIX_ISR));
David Daneyd6aa60a2009-10-14 12:04:41 -07001171
1172 if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name,
1173 netdev)) {
1174 dev_err(p->dev, "request_irq(%d) failed.\n", p->irq);
1175 goto err_noirq;
1176 }
1177
1178 /* Interrupt every single RX packet */
1179 mix_irhwm.u64 = 0;
1180 mix_irhwm.s.irhwm = 0;
David Daney368bec02012-07-05 18:12:39 +02001181 cvmx_write_csr(p->mix + MIX_IRHWM, mix_irhwm.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -07001182
David Daneyb635e062010-05-05 13:03:11 +00001183 /* Interrupt when we have 1 or more packets to clean. */
David Daneyd6aa60a2009-10-14 12:04:41 -07001184 mix_orhwm.u64 = 0;
David Daneyeeae05a2012-08-21 11:45:06 -07001185 mix_orhwm.s.orhwm = 0;
David Daney368bec02012-07-05 18:12:39 +02001186 cvmx_write_csr(p->mix + MIX_ORHWM, mix_orhwm.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -07001187
1188 /* Enable receive and transmit interrupts */
1189 mix_intena.u64 = 0;
1190 mix_intena.s.ithena = 1;
1191 mix_intena.s.othena = 1;
David Daney368bec02012-07-05 18:12:39 +02001192 cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -07001193
David Daneyd6aa60a2009-10-14 12:04:41 -07001194 /* Enable packet I/O. */
1195
1196 rxx_frm_ctl.u64 = 0;
Chad Reese3d305852012-08-21 11:45:07 -07001197 rxx_frm_ctl.s.ptp_mode = p->has_rx_tstamp ? 1 : 0;
David Daneyd6aa60a2009-10-14 12:04:41 -07001198 rxx_frm_ctl.s.pre_align = 1;
1199 /*
1200 * When set, disables the length check for non-min sized pkts
1201 * with padding in the client data.
1202 */
1203 rxx_frm_ctl.s.pad_len = 1;
1204 /* When set, disables the length check for VLAN pkts */
1205 rxx_frm_ctl.s.vlan_len = 1;
1206 /* When set, PREAMBLE checking is less strict */
1207 rxx_frm_ctl.s.pre_free = 1;
1208 /* Control Pause Frames can match station SMAC */
1209 rxx_frm_ctl.s.ctl_smac = 0;
1210 /* Control Pause Frames can match globally assign Multicast address */
1211 rxx_frm_ctl.s.ctl_mcst = 1;
1212 /* Forward pause information to TX block */
1213 rxx_frm_ctl.s.ctl_bck = 1;
1214 /* Drop Control Pause Frames */
1215 rxx_frm_ctl.s.ctl_drp = 1;
1216 /* Strip off the preamble */
1217 rxx_frm_ctl.s.pre_strp = 1;
1218 /*
1219 * This port is configured to send PREAMBLE+SFD to begin every
1220 * frame. GMX checks that the PREAMBLE is sent correctly.
1221 */
1222 rxx_frm_ctl.s.pre_chk = 1;
David Daney368bec02012-07-05 18:12:39 +02001223 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -07001224
David Daneyeeae05a2012-08-21 11:45:06 -07001225 /* Configure the port duplex, speed and enables */
1226 octeon_mgmt_disable_link(p);
1227 if (p->phydev)
1228 octeon_mgmt_update_link(p);
1229 octeon_mgmt_enable_link(p);
David Daneyd6aa60a2009-10-14 12:04:41 -07001230
1231 p->last_link = 0;
David Daneyeeae05a2012-08-21 11:45:06 -07001232 p->last_speed = 0;
1233 /* PHY is not present in simulator. The carrier is enabled
1234 * while initializing the phy for simulator, leave it enabled.
1235 */
1236 if (p->phydev) {
1237 netif_carrier_off(netdev);
1238 phy_start_aneg(p->phydev);
David Daneyd6aa60a2009-10-14 12:04:41 -07001239 }
1240
1241 netif_wake_queue(netdev);
1242 napi_enable(&p->napi);
1243
1244 return 0;
1245err_noirq:
1246 octeon_mgmt_reset_hw(p);
1247 dma_unmap_single(p->dev, p->rx_ring_handle,
1248 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
1249 DMA_BIDIRECTIONAL);
1250 kfree(p->rx_ring);
1251err_nomem:
1252 dma_unmap_single(p->dev, p->tx_ring_handle,
1253 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
1254 DMA_BIDIRECTIONAL);
1255 kfree(p->tx_ring);
1256 return -ENOMEM;
1257}
1258
1259static int octeon_mgmt_stop(struct net_device *netdev)
1260{
1261 struct octeon_mgmt *p = netdev_priv(netdev);
1262
1263 napi_disable(&p->napi);
1264 netif_stop_queue(netdev);
1265
1266 if (p->phydev)
1267 phy_disconnect(p->phydev);
David Daneyeeae05a2012-08-21 11:45:06 -07001268 p->phydev = NULL;
David Daneyd6aa60a2009-10-14 12:04:41 -07001269
1270 netif_carrier_off(netdev);
1271
1272 octeon_mgmt_reset_hw(p);
1273
David Daneyd6aa60a2009-10-14 12:04:41 -07001274 free_irq(p->irq, netdev);
1275
1276 /* dma_unmap is a nop on Octeon, so just free everything. */
1277 skb_queue_purge(&p->tx_list);
1278 skb_queue_purge(&p->rx_list);
1279
1280 dma_unmap_single(p->dev, p->rx_ring_handle,
1281 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
1282 DMA_BIDIRECTIONAL);
1283 kfree(p->rx_ring);
1284
1285 dma_unmap_single(p->dev, p->tx_ring_handle,
1286 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
1287 DMA_BIDIRECTIONAL);
1288 kfree(p->tx_ring);
1289
David Daneyd6aa60a2009-10-14 12:04:41 -07001290 return 0;
1291}
1292
1293static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
1294{
1295 struct octeon_mgmt *p = netdev_priv(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -07001296 union mgmt_port_ring_entry re;
1297 unsigned long flags;
David Daney4e4a4f12010-05-05 13:03:12 +00001298 int rv = NETDEV_TX_BUSY;
David Daneyd6aa60a2009-10-14 12:04:41 -07001299
1300 re.d64 = 0;
Chad Reese3d305852012-08-21 11:45:07 -07001301 re.s.tstamp = ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) != 0);
David Daneyd6aa60a2009-10-14 12:04:41 -07001302 re.s.len = skb->len;
1303 re.s.addr = dma_map_single(p->dev, skb->data,
1304 skb->len,
1305 DMA_TO_DEVICE);
1306
1307 spin_lock_irqsave(&p->tx_list.lock, flags);
1308
David Daney4e4a4f12010-05-05 13:03:12 +00001309 if (unlikely(p->tx_current_fill >= ring_max_fill(OCTEON_MGMT_TX_RING_SIZE) - 1)) {
1310 spin_unlock_irqrestore(&p->tx_list.lock, flags);
1311 netif_stop_queue(netdev);
1312 spin_lock_irqsave(&p->tx_list.lock, flags);
1313 }
1314
David Daneyd6aa60a2009-10-14 12:04:41 -07001315 if (unlikely(p->tx_current_fill >=
1316 ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) {
1317 spin_unlock_irqrestore(&p->tx_list.lock, flags);
David Daneyd6aa60a2009-10-14 12:04:41 -07001318 dma_unmap_single(p->dev, re.s.addr, re.s.len,
1319 DMA_TO_DEVICE);
David Daney4e4a4f12010-05-05 13:03:12 +00001320 goto out;
David Daneyd6aa60a2009-10-14 12:04:41 -07001321 }
1322
1323 __skb_queue_tail(&p->tx_list, skb);
1324
1325 /* Put it in the ring. */
1326 p->tx_ring[p->tx_next] = re.d64;
1327 p->tx_next = (p->tx_next + 1) % OCTEON_MGMT_TX_RING_SIZE;
1328 p->tx_current_fill++;
1329
1330 spin_unlock_irqrestore(&p->tx_list.lock, flags);
1331
1332 dma_sync_single_for_device(p->dev, p->tx_ring_handle,
1333 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
1334 DMA_BIDIRECTIONAL);
1335
1336 netdev->stats.tx_packets++;
1337 netdev->stats.tx_bytes += skb->len;
1338
1339 /* Ring the bell. */
David Daney368bec02012-07-05 18:12:39 +02001340 cvmx_write_csr(p->mix + MIX_ORING2, 1);
David Daneyd6aa60a2009-10-14 12:04:41 -07001341
David Daneyeeae05a2012-08-21 11:45:06 -07001342 netdev->trans_start = jiffies;
David Daney4e4a4f12010-05-05 13:03:12 +00001343 rv = NETDEV_TX_OK;
1344out:
David Daneyd6aa60a2009-10-14 12:04:41 -07001345 octeon_mgmt_update_tx_stats(netdev);
David Daney4e4a4f12010-05-05 13:03:12 +00001346 return rv;
David Daneyd6aa60a2009-10-14 12:04:41 -07001347}
1348
1349#ifdef CONFIG_NET_POLL_CONTROLLER
1350static void octeon_mgmt_poll_controller(struct net_device *netdev)
1351{
1352 struct octeon_mgmt *p = netdev_priv(netdev);
1353
1354 octeon_mgmt_receive_packets(p, 16);
1355 octeon_mgmt_update_rx_stats(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -07001356}
1357#endif
1358
1359static void octeon_mgmt_get_drvinfo(struct net_device *netdev,
1360 struct ethtool_drvinfo *info)
1361{
1362 strncpy(info->driver, DRV_NAME, sizeof(info->driver));
1363 strncpy(info->version, DRV_VERSION, sizeof(info->version));
1364 strncpy(info->fw_version, "N/A", sizeof(info->fw_version));
1365 strncpy(info->bus_info, "N/A", sizeof(info->bus_info));
1366 info->n_stats = 0;
1367 info->testinfo_len = 0;
1368 info->regdump_len = 0;
1369 info->eedump_len = 0;
1370}
1371
1372static int octeon_mgmt_get_settings(struct net_device *netdev,
1373 struct ethtool_cmd *cmd)
1374{
1375 struct octeon_mgmt *p = netdev_priv(netdev);
1376
1377 if (p->phydev)
1378 return phy_ethtool_gset(p->phydev, cmd);
1379
David Daneyf21105d2012-08-21 11:45:08 -07001380 return -EOPNOTSUPP;
David Daneyd6aa60a2009-10-14 12:04:41 -07001381}
1382
1383static int octeon_mgmt_set_settings(struct net_device *netdev,
1384 struct ethtool_cmd *cmd)
1385{
1386 struct octeon_mgmt *p = netdev_priv(netdev);
1387
1388 if (!capable(CAP_NET_ADMIN))
1389 return -EPERM;
1390
1391 if (p->phydev)
1392 return phy_ethtool_sset(p->phydev, cmd);
1393
David Daneyf21105d2012-08-21 11:45:08 -07001394 return -EOPNOTSUPP;
1395}
1396
1397static int octeon_mgmt_nway_reset(struct net_device *dev)
1398{
1399 struct octeon_mgmt *p = netdev_priv(dev);
1400
1401 if (!capable(CAP_NET_ADMIN))
1402 return -EPERM;
1403
1404 if (p->phydev)
1405 return phy_start_aneg(p->phydev);
1406
1407 return -EOPNOTSUPP;
David Daneyd6aa60a2009-10-14 12:04:41 -07001408}
1409
1410static const struct ethtool_ops octeon_mgmt_ethtool_ops = {
1411 .get_drvinfo = octeon_mgmt_get_drvinfo,
David Daneyd6aa60a2009-10-14 12:04:41 -07001412 .get_settings = octeon_mgmt_get_settings,
David Daneyf21105d2012-08-21 11:45:08 -07001413 .set_settings = octeon_mgmt_set_settings,
1414 .nway_reset = octeon_mgmt_nway_reset,
1415 .get_link = ethtool_op_get_link,
David Daneyd6aa60a2009-10-14 12:04:41 -07001416};
1417
1418static const struct net_device_ops octeon_mgmt_ops = {
1419 .ndo_open = octeon_mgmt_open,
1420 .ndo_stop = octeon_mgmt_stop,
1421 .ndo_start_xmit = octeon_mgmt_xmit,
David Daneyeeae05a2012-08-21 11:45:06 -07001422 .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering,
David Daneyd6aa60a2009-10-14 12:04:41 -07001423 .ndo_set_mac_address = octeon_mgmt_set_mac_address,
David Daneyeeae05a2012-08-21 11:45:06 -07001424 .ndo_do_ioctl = octeon_mgmt_ioctl,
David Daneyd6aa60a2009-10-14 12:04:41 -07001425 .ndo_change_mtu = octeon_mgmt_change_mtu,
1426#ifdef CONFIG_NET_POLL_CONTROLLER
1427 .ndo_poll_controller = octeon_mgmt_poll_controller,
1428#endif
1429};
1430
David Daneyd30b1812010-06-24 09:14:47 +00001431static int __devinit octeon_mgmt_probe(struct platform_device *pdev)
David Daneyd6aa60a2009-10-14 12:04:41 -07001432{
David Daneyd6aa60a2009-10-14 12:04:41 -07001433 struct net_device *netdev;
1434 struct octeon_mgmt *p;
David Daney368bec02012-07-05 18:12:39 +02001435 const __be32 *data;
1436 const u8 *mac;
1437 struct resource *res_mix;
1438 struct resource *res_agl;
David Daneyeeae05a2012-08-21 11:45:06 -07001439 struct resource *res_agl_prt_ctl;
David Daney368bec02012-07-05 18:12:39 +02001440 int len;
1441 int result;
David Daneyd6aa60a2009-10-14 12:04:41 -07001442
1443 netdev = alloc_etherdev(sizeof(struct octeon_mgmt));
1444 if (netdev == NULL)
1445 return -ENOMEM;
1446
David Daney052958e2012-08-21 11:45:09 -07001447 SET_NETDEV_DEV(netdev, &pdev->dev);
1448
David Daneyd6aa60a2009-10-14 12:04:41 -07001449 dev_set_drvdata(&pdev->dev, netdev);
1450 p = netdev_priv(netdev);
1451 netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll,
1452 OCTEON_MGMT_NAPI_WEIGHT);
1453
1454 p->netdev = netdev;
1455 p->dev = &pdev->dev;
Chad Reese3d305852012-08-21 11:45:07 -07001456 p->has_rx_tstamp = false;
David Daneyd6aa60a2009-10-14 12:04:41 -07001457
David Daney368bec02012-07-05 18:12:39 +02001458 data = of_get_property(pdev->dev.of_node, "cell-index", &len);
1459 if (data && len == sizeof(*data)) {
1460 p->port = be32_to_cpup(data);
1461 } else {
1462 dev_err(&pdev->dev, "no 'cell-index' property\n");
1463 result = -ENXIO;
1464 goto err;
1465 }
1466
David Daneyd6aa60a2009-10-14 12:04:41 -07001467 snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port);
1468
David Daney368bec02012-07-05 18:12:39 +02001469 result = platform_get_irq(pdev, 0);
1470 if (result < 0)
David Daneyd6aa60a2009-10-14 12:04:41 -07001471 goto err;
1472
David Daney368bec02012-07-05 18:12:39 +02001473 p->irq = result;
1474
1475 res_mix = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1476 if (res_mix == NULL) {
1477 dev_err(&pdev->dev, "no 'reg' resource\n");
1478 result = -ENXIO;
1479 goto err;
1480 }
1481
1482 res_agl = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1483 if (res_agl == NULL) {
1484 dev_err(&pdev->dev, "no 'reg' resource\n");
1485 result = -ENXIO;
1486 goto err;
1487 }
1488
David Daneyeeae05a2012-08-21 11:45:06 -07001489 res_agl_prt_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 3);
1490 if (res_agl_prt_ctl == NULL) {
1491 dev_err(&pdev->dev, "no 'reg' resource\n");
1492 result = -ENXIO;
1493 goto err;
1494 }
1495
David Daney368bec02012-07-05 18:12:39 +02001496 p->mix_phys = res_mix->start;
1497 p->mix_size = resource_size(res_mix);
1498 p->agl_phys = res_agl->start;
1499 p->agl_size = resource_size(res_agl);
David Daneyeeae05a2012-08-21 11:45:06 -07001500 p->agl_prt_ctl_phys = res_agl_prt_ctl->start;
1501 p->agl_prt_ctl_size = resource_size(res_agl_prt_ctl);
David Daney368bec02012-07-05 18:12:39 +02001502
1503
1504 if (!devm_request_mem_region(&pdev->dev, p->mix_phys, p->mix_size,
1505 res_mix->name)) {
1506 dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
1507 res_mix->name);
1508 result = -ENXIO;
1509 goto err;
1510 }
1511
1512 if (!devm_request_mem_region(&pdev->dev, p->agl_phys, p->agl_size,
1513 res_agl->name)) {
1514 result = -ENXIO;
1515 dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
1516 res_agl->name);
1517 goto err;
1518 }
1519
David Daneyeeae05a2012-08-21 11:45:06 -07001520 if (!devm_request_mem_region(&pdev->dev, p->agl_prt_ctl_phys,
1521 p->agl_prt_ctl_size, res_agl_prt_ctl->name)) {
1522 result = -ENXIO;
1523 dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
1524 res_agl_prt_ctl->name);
1525 goto err;
1526 }
David Daney368bec02012-07-05 18:12:39 +02001527
1528 p->mix = (u64)devm_ioremap(&pdev->dev, p->mix_phys, p->mix_size);
1529 p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size);
David Daneyeeae05a2012-08-21 11:45:06 -07001530 p->agl_prt_ctl = (u64)devm_ioremap(&pdev->dev, p->agl_prt_ctl_phys,
1531 p->agl_prt_ctl_size);
David Daneyd6aa60a2009-10-14 12:04:41 -07001532 spin_lock_init(&p->lock);
1533
1534 skb_queue_head_init(&p->tx_list);
1535 skb_queue_head_init(&p->rx_list);
1536 tasklet_init(&p->tx_clean_tasklet,
1537 octeon_mgmt_clean_tx_tasklet, (unsigned long)p);
1538
Jiri Pirko01789342011-08-16 06:29:00 +00001539 netdev->priv_flags |= IFF_UNICAST_FLT;
1540
David Daneyd6aa60a2009-10-14 12:04:41 -07001541 netdev->netdev_ops = &octeon_mgmt_ops;
1542 netdev->ethtool_ops = &octeon_mgmt_ethtool_ops;
1543
David Daney368bec02012-07-05 18:12:39 +02001544 mac = of_get_mac_address(pdev->dev.of_node);
David Daneyd6aa60a2009-10-14 12:04:41 -07001545
David Daneyf3212382012-08-21 11:45:10 -07001546 if (mac && is_valid_ether_addr(mac)) {
1547 memcpy(netdev->dev_addr, mac, ETH_ALEN);
1548 netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
1549 } else {
1550 eth_hw_addr_random(netdev);
1551 }
David Daneyd6aa60a2009-10-14 12:04:41 -07001552
David Daney368bec02012-07-05 18:12:39 +02001553 p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
1554
1555 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
1556 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
1557
David Daneyeeae05a2012-08-21 11:45:06 -07001558 netif_carrier_off(netdev);
David Daney368bec02012-07-05 18:12:39 +02001559 result = register_netdev(netdev);
1560 if (result)
David Daneyd6aa60a2009-10-14 12:04:41 -07001561 goto err;
1562
1563 dev_info(&pdev->dev, "Version " DRV_VERSION "\n");
1564 return 0;
David Daney368bec02012-07-05 18:12:39 +02001565
David Daneyd6aa60a2009-10-14 12:04:41 -07001566err:
1567 free_netdev(netdev);
David Daney368bec02012-07-05 18:12:39 +02001568 return result;
David Daneyd6aa60a2009-10-14 12:04:41 -07001569}
1570
David Daneyd30b1812010-06-24 09:14:47 +00001571static int __devexit octeon_mgmt_remove(struct platform_device *pdev)
David Daneyd6aa60a2009-10-14 12:04:41 -07001572{
1573 struct net_device *netdev = dev_get_drvdata(&pdev->dev);
1574
1575 unregister_netdev(netdev);
1576 free_netdev(netdev);
1577 return 0;
1578}
1579
David Daney368bec02012-07-05 18:12:39 +02001580static struct of_device_id octeon_mgmt_match[] = {
1581 {
1582 .compatible = "cavium,octeon-5750-mix",
1583 },
1584 {},
1585};
1586MODULE_DEVICE_TABLE(of, octeon_mgmt_match);
1587
David Daneyd6aa60a2009-10-14 12:04:41 -07001588static struct platform_driver octeon_mgmt_driver = {
1589 .driver = {
1590 .name = "octeon_mgmt",
1591 .owner = THIS_MODULE,
David Daney368bec02012-07-05 18:12:39 +02001592 .of_match_table = octeon_mgmt_match,
David Daneyd6aa60a2009-10-14 12:04:41 -07001593 },
1594 .probe = octeon_mgmt_probe,
David Daneyd30b1812010-06-24 09:14:47 +00001595 .remove = __devexit_p(octeon_mgmt_remove),
David Daneyd6aa60a2009-10-14 12:04:41 -07001596};
1597
1598extern void octeon_mdiobus_force_mod_depencency(void);
1599
1600static int __init octeon_mgmt_mod_init(void)
1601{
1602 /* Force our mdiobus driver module to be loaded first. */
1603 octeon_mdiobus_force_mod_depencency();
1604 return platform_driver_register(&octeon_mgmt_driver);
1605}
1606
1607static void __exit octeon_mgmt_mod_exit(void)
1608{
1609 platform_driver_unregister(&octeon_mgmt_driver);
1610}
1611
1612module_init(octeon_mgmt_mod_init);
1613module_exit(octeon_mgmt_mod_exit);
1614
1615MODULE_DESCRIPTION(DRV_DESCRIPTION);
1616MODULE_AUTHOR("David Daney");
1617MODULE_LICENSE("GPL");
1618MODULE_VERSION(DRV_VERSION);