blob: 687a6a0c7147626cf296e63e2a573511734208c4 [file] [log] [blame]
David Daneyd6aa60a2009-10-14 12:04:41 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
David Daneyeeae05a2012-08-21 11:45:06 -07006 * Copyright (C) 2009-2012 Cavium, Inc
David Daneyd6aa60a2009-10-14 12:04:41 -07007 */
8
David Daneyd6aa60a2009-10-14 12:04:41 -07009#include <linux/platform_device.h>
David Daney368bec02012-07-05 18:12:39 +020010#include <linux/dma-mapping.h>
David Daneyd6aa60a2009-10-14 12:04:41 -070011#include <linux/etherdevice.h>
David Daney368bec02012-07-05 18:12:39 +020012#include <linux/capability.h>
Chad Reese3d305852012-08-21 11:45:07 -070013#include <linux/net_tstamp.h>
David Daney368bec02012-07-05 18:12:39 +020014#include <linux/interrupt.h>
15#include <linux/netdevice.h>
16#include <linux/spinlock.h>
David Daneyd6aa60a2009-10-14 12:04:41 -070017#include <linux/if_vlan.h>
David Daney368bec02012-07-05 18:12:39 +020018#include <linux/of_mdio.h>
19#include <linux/module.h>
20#include <linux/of_net.h>
21#include <linux/init.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090022#include <linux/slab.h>
David Daneyd6aa60a2009-10-14 12:04:41 -070023#include <linux/phy.h>
David Daney368bec02012-07-05 18:12:39 +020024#include <linux/io.h>
David Daneyd6aa60a2009-10-14 12:04:41 -070025
26#include <asm/octeon/octeon.h>
27#include <asm/octeon/cvmx-mixx-defs.h>
28#include <asm/octeon/cvmx-agl-defs.h>
29
30#define DRV_NAME "octeon_mgmt"
31#define DRV_VERSION "2.0"
32#define DRV_DESCRIPTION \
33 "Cavium Networks Octeon MII (management) port Network Driver"
34
35#define OCTEON_MGMT_NAPI_WEIGHT 16
36
37/*
38 * Ring sizes that are powers of two allow for more efficient modulo
39 * opertions.
40 */
41#define OCTEON_MGMT_RX_RING_SIZE 512
42#define OCTEON_MGMT_TX_RING_SIZE 128
43
44/* Allow 8 bytes for vlan and FCS. */
45#define OCTEON_MGMT_RX_HEADROOM (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
46
47union mgmt_port_ring_entry {
48 u64 d64;
49 struct {
50 u64 reserved_62_63:2;
51 /* Length of the buffer/packet in bytes */
52 u64 len:14;
53 /* For TX, signals that the packet should be timestamped */
54 u64 tstamp:1;
55 /* The RX error code */
56 u64 code:7;
57#define RING_ENTRY_CODE_DONE 0xf
58#define RING_ENTRY_CODE_MORE 0x10
59 /* Physical address of the buffer */
60 u64 addr:40;
61 } s;
62};
63
David Daney368bec02012-07-05 18:12:39 +020064#define MIX_ORING1 0x0
65#define MIX_ORING2 0x8
66#define MIX_IRING1 0x10
67#define MIX_IRING2 0x18
68#define MIX_CTL 0x20
69#define MIX_IRHWM 0x28
70#define MIX_IRCNT 0x30
71#define MIX_ORHWM 0x38
72#define MIX_ORCNT 0x40
73#define MIX_ISR 0x48
74#define MIX_INTENA 0x50
75#define MIX_REMCNT 0x58
76#define MIX_BIST 0x78
77
78#define AGL_GMX_PRT_CFG 0x10
79#define AGL_GMX_RX_FRM_CTL 0x18
80#define AGL_GMX_RX_FRM_MAX 0x30
81#define AGL_GMX_RX_JABBER 0x38
82#define AGL_GMX_RX_STATS_CTL 0x50
83
84#define AGL_GMX_RX_STATS_PKTS_DRP 0xb0
85#define AGL_GMX_RX_STATS_OCTS_DRP 0xb8
86#define AGL_GMX_RX_STATS_PKTS_BAD 0xc0
87
88#define AGL_GMX_RX_ADR_CTL 0x100
89#define AGL_GMX_RX_ADR_CAM_EN 0x108
90#define AGL_GMX_RX_ADR_CAM0 0x180
91#define AGL_GMX_RX_ADR_CAM1 0x188
92#define AGL_GMX_RX_ADR_CAM2 0x190
93#define AGL_GMX_RX_ADR_CAM3 0x198
94#define AGL_GMX_RX_ADR_CAM4 0x1a0
95#define AGL_GMX_RX_ADR_CAM5 0x1a8
96
David Daneyeeae05a2012-08-21 11:45:06 -070097#define AGL_GMX_TX_CLK 0x208
David Daney368bec02012-07-05 18:12:39 +020098#define AGL_GMX_TX_STATS_CTL 0x268
99#define AGL_GMX_TX_CTL 0x270
100#define AGL_GMX_TX_STAT0 0x280
101#define AGL_GMX_TX_STAT1 0x288
102#define AGL_GMX_TX_STAT2 0x290
103#define AGL_GMX_TX_STAT3 0x298
104#define AGL_GMX_TX_STAT4 0x2a0
105#define AGL_GMX_TX_STAT5 0x2a8
106#define AGL_GMX_TX_STAT6 0x2b0
107#define AGL_GMX_TX_STAT7 0x2b8
108#define AGL_GMX_TX_STAT8 0x2c0
109#define AGL_GMX_TX_STAT9 0x2c8
110
David Daneyd6aa60a2009-10-14 12:04:41 -0700111struct octeon_mgmt {
112 struct net_device *netdev;
David Daney368bec02012-07-05 18:12:39 +0200113 u64 mix;
114 u64 agl;
David Daneyeeae05a2012-08-21 11:45:06 -0700115 u64 agl_prt_ctl;
David Daneyd6aa60a2009-10-14 12:04:41 -0700116 int port;
117 int irq;
Chad Reese3d305852012-08-21 11:45:07 -0700118 bool has_rx_tstamp;
David Daneyd6aa60a2009-10-14 12:04:41 -0700119 u64 *tx_ring;
120 dma_addr_t tx_ring_handle;
121 unsigned int tx_next;
122 unsigned int tx_next_clean;
123 unsigned int tx_current_fill;
124 /* The tx_list lock also protects the ring related variables */
125 struct sk_buff_head tx_list;
126
127 /* RX variables only touched in napi_poll. No locking necessary. */
128 u64 *rx_ring;
129 dma_addr_t rx_ring_handle;
130 unsigned int rx_next;
131 unsigned int rx_next_fill;
132 unsigned int rx_current_fill;
133 struct sk_buff_head rx_list;
134
135 spinlock_t lock;
136 unsigned int last_duplex;
137 unsigned int last_link;
David Daneyeeae05a2012-08-21 11:45:06 -0700138 unsigned int last_speed;
David Daneyd6aa60a2009-10-14 12:04:41 -0700139 struct device *dev;
140 struct napi_struct napi;
141 struct tasklet_struct tx_clean_tasklet;
142 struct phy_device *phydev;
David Daney368bec02012-07-05 18:12:39 +0200143 struct device_node *phy_np;
144 resource_size_t mix_phys;
145 resource_size_t mix_size;
146 resource_size_t agl_phys;
147 resource_size_t agl_size;
David Daneyeeae05a2012-08-21 11:45:06 -0700148 resource_size_t agl_prt_ctl_phys;
149 resource_size_t agl_prt_ctl_size;
David Daneyd6aa60a2009-10-14 12:04:41 -0700150};
151
152static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable)
153{
David Daneyd6aa60a2009-10-14 12:04:41 -0700154 union cvmx_mixx_intena mix_intena;
155 unsigned long flags;
156
157 spin_lock_irqsave(&p->lock, flags);
David Daney368bec02012-07-05 18:12:39 +0200158 mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA);
David Daneyd6aa60a2009-10-14 12:04:41 -0700159 mix_intena.s.ithena = enable ? 1 : 0;
David Daney368bec02012-07-05 18:12:39 +0200160 cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700161 spin_unlock_irqrestore(&p->lock, flags);
162}
163
164static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable)
165{
David Daneyd6aa60a2009-10-14 12:04:41 -0700166 union cvmx_mixx_intena mix_intena;
167 unsigned long flags;
168
169 spin_lock_irqsave(&p->lock, flags);
David Daney368bec02012-07-05 18:12:39 +0200170 mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA);
David Daneyd6aa60a2009-10-14 12:04:41 -0700171 mix_intena.s.othena = enable ? 1 : 0;
David Daney368bec02012-07-05 18:12:39 +0200172 cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700173 spin_unlock_irqrestore(&p->lock, flags);
174}
175
176static inline void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p)
177{
178 octeon_mgmt_set_rx_irq(p, 1);
179}
180
181static inline void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p)
182{
183 octeon_mgmt_set_rx_irq(p, 0);
184}
185
186static inline void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p)
187{
188 octeon_mgmt_set_tx_irq(p, 1);
189}
190
191static inline void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p)
192{
193 octeon_mgmt_set_tx_irq(p, 0);
194}
195
196static unsigned int ring_max_fill(unsigned int ring_size)
197{
198 return ring_size - 8;
199}
200
201static unsigned int ring_size_to_bytes(unsigned int ring_size)
202{
203 return ring_size * sizeof(union mgmt_port_ring_entry);
204}
205
206static void octeon_mgmt_rx_fill_ring(struct net_device *netdev)
207{
208 struct octeon_mgmt *p = netdev_priv(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -0700209
210 while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) {
211 unsigned int size;
212 union mgmt_port_ring_entry re;
213 struct sk_buff *skb;
214
215 /* CN56XX pass 1 needs 8 bytes of padding. */
216 size = netdev->mtu + OCTEON_MGMT_RX_HEADROOM + 8 + NET_IP_ALIGN;
217
218 skb = netdev_alloc_skb(netdev, size);
219 if (!skb)
220 break;
221 skb_reserve(skb, NET_IP_ALIGN);
222 __skb_queue_tail(&p->rx_list, skb);
223
224 re.d64 = 0;
225 re.s.len = size;
226 re.s.addr = dma_map_single(p->dev, skb->data,
227 size,
228 DMA_FROM_DEVICE);
229
230 /* Put it in the ring. */
231 p->rx_ring[p->rx_next_fill] = re.d64;
232 dma_sync_single_for_device(p->dev, p->rx_ring_handle,
233 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
234 DMA_BIDIRECTIONAL);
235 p->rx_next_fill =
236 (p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE;
237 p->rx_current_fill++;
238 /* Ring the bell. */
David Daney368bec02012-07-05 18:12:39 +0200239 cvmx_write_csr(p->mix + MIX_IRING2, 1);
David Daneyd6aa60a2009-10-14 12:04:41 -0700240 }
241}
242
Chad Reese3d305852012-08-21 11:45:07 -0700243static ktime_t ptp_to_ktime(u64 ptptime)
244{
245 ktime_t ktimebase;
246 u64 ptpbase;
247 unsigned long flags;
248
249 local_irq_save(flags);
250 /* Fill the icache with the code */
251 ktime_get_real();
252 /* Flush all pending operations */
253 mb();
254 /* Read the time and PTP clock as close together as
255 * possible. It is important that this sequence take the same
256 * amount of time to reduce jitter
257 */
258 ktimebase = ktime_get_real();
259 ptpbase = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_HI);
260 local_irq_restore(flags);
261
262 return ktime_sub_ns(ktimebase, ptpbase - ptptime);
263}
264
David Daneyd6aa60a2009-10-14 12:04:41 -0700265static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
266{
David Daneyd6aa60a2009-10-14 12:04:41 -0700267 union cvmx_mixx_orcnt mix_orcnt;
268 union mgmt_port_ring_entry re;
269 struct sk_buff *skb;
270 int cleaned = 0;
271 unsigned long flags;
272
David Daney368bec02012-07-05 18:12:39 +0200273 mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
David Daneyd6aa60a2009-10-14 12:04:41 -0700274 while (mix_orcnt.s.orcnt) {
David Daney4d30b802010-05-05 13:03:09 +0000275 spin_lock_irqsave(&p->tx_list.lock, flags);
276
David Daney368bec02012-07-05 18:12:39 +0200277 mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
David Daney4d30b802010-05-05 13:03:09 +0000278
279 if (mix_orcnt.s.orcnt == 0) {
280 spin_unlock_irqrestore(&p->tx_list.lock, flags);
281 break;
282 }
283
David Daneyd6aa60a2009-10-14 12:04:41 -0700284 dma_sync_single_for_cpu(p->dev, p->tx_ring_handle,
285 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
286 DMA_BIDIRECTIONAL);
287
David Daneyd6aa60a2009-10-14 12:04:41 -0700288 re.d64 = p->tx_ring[p->tx_next_clean];
289 p->tx_next_clean =
290 (p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE;
291 skb = __skb_dequeue(&p->tx_list);
292
293 mix_orcnt.u64 = 0;
294 mix_orcnt.s.orcnt = 1;
295
296 /* Acknowledge to hardware that we have the buffer. */
David Daney368bec02012-07-05 18:12:39 +0200297 cvmx_write_csr(p->mix + MIX_ORCNT, mix_orcnt.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700298 p->tx_current_fill--;
299
300 spin_unlock_irqrestore(&p->tx_list.lock, flags);
301
302 dma_unmap_single(p->dev, re.s.addr, re.s.len,
303 DMA_TO_DEVICE);
Chad Reese3d305852012-08-21 11:45:07 -0700304
305 /* Read the hardware TX timestamp if one was recorded */
306 if (unlikely(re.s.tstamp)) {
307 struct skb_shared_hwtstamps ts;
308 /* Read the timestamp */
309 u64 ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port));
310 /* Remove the timestamp from the FIFO */
311 cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0);
312 /* Tell the kernel about the timestamp */
313 ts.syststamp = ptp_to_ktime(ns);
314 ts.hwtstamp = ns_to_ktime(ns);
315 skb_tstamp_tx(skb, &ts);
316 }
317
David Daneyd6aa60a2009-10-14 12:04:41 -0700318 dev_kfree_skb_any(skb);
319 cleaned++;
320
David Daney368bec02012-07-05 18:12:39 +0200321 mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
David Daneyd6aa60a2009-10-14 12:04:41 -0700322 }
323
324 if (cleaned && netif_queue_stopped(p->netdev))
325 netif_wake_queue(p->netdev);
326}
327
328static void octeon_mgmt_clean_tx_tasklet(unsigned long arg)
329{
330 struct octeon_mgmt *p = (struct octeon_mgmt *)arg;
331 octeon_mgmt_clean_tx_buffers(p);
332 octeon_mgmt_enable_tx_irq(p);
333}
334
335static void octeon_mgmt_update_rx_stats(struct net_device *netdev)
336{
337 struct octeon_mgmt *p = netdev_priv(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -0700338 unsigned long flags;
339 u64 drop, bad;
340
341 /* These reads also clear the count registers. */
David Daney368bec02012-07-05 18:12:39 +0200342 drop = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP);
343 bad = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD);
David Daneyd6aa60a2009-10-14 12:04:41 -0700344
345 if (drop || bad) {
346 /* Do an atomic update. */
347 spin_lock_irqsave(&p->lock, flags);
348 netdev->stats.rx_errors += bad;
349 netdev->stats.rx_dropped += drop;
350 spin_unlock_irqrestore(&p->lock, flags);
351 }
352}
353
354static void octeon_mgmt_update_tx_stats(struct net_device *netdev)
355{
356 struct octeon_mgmt *p = netdev_priv(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -0700357 unsigned long flags;
358
359 union cvmx_agl_gmx_txx_stat0 s0;
360 union cvmx_agl_gmx_txx_stat1 s1;
361
362 /* These reads also clear the count registers. */
David Daney368bec02012-07-05 18:12:39 +0200363 s0.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT0);
364 s1.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT1);
David Daneyd6aa60a2009-10-14 12:04:41 -0700365
366 if (s0.s.xsdef || s0.s.xscol || s1.s.scol || s1.s.mcol) {
367 /* Do an atomic update. */
368 spin_lock_irqsave(&p->lock, flags);
369 netdev->stats.tx_errors += s0.s.xsdef + s0.s.xscol;
370 netdev->stats.collisions += s1.s.scol + s1.s.mcol;
371 spin_unlock_irqrestore(&p->lock, flags);
372 }
373}
374
375/*
376 * Dequeue a receive skb and its corresponding ring entry. The ring
377 * entry is returned, *pskb is updated to point to the skb.
378 */
379static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p,
380 struct sk_buff **pskb)
381{
382 union mgmt_port_ring_entry re;
383
384 dma_sync_single_for_cpu(p->dev, p->rx_ring_handle,
385 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
386 DMA_BIDIRECTIONAL);
387
388 re.d64 = p->rx_ring[p->rx_next];
389 p->rx_next = (p->rx_next + 1) % OCTEON_MGMT_RX_RING_SIZE;
390 p->rx_current_fill--;
391 *pskb = __skb_dequeue(&p->rx_list);
392
393 dma_unmap_single(p->dev, re.s.addr,
394 ETH_FRAME_LEN + OCTEON_MGMT_RX_HEADROOM,
395 DMA_FROM_DEVICE);
396
397 return re.d64;
398}
399
400
401static int octeon_mgmt_receive_one(struct octeon_mgmt *p)
402{
David Daneyd6aa60a2009-10-14 12:04:41 -0700403 struct net_device *netdev = p->netdev;
404 union cvmx_mixx_ircnt mix_ircnt;
405 union mgmt_port_ring_entry re;
406 struct sk_buff *skb;
407 struct sk_buff *skb2;
408 struct sk_buff *skb_new;
409 union mgmt_port_ring_entry re2;
410 int rc = 1;
411
412
413 re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb);
414 if (likely(re.s.code == RING_ENTRY_CODE_DONE)) {
415 /* A good packet, send it up. */
416 skb_put(skb, re.s.len);
417good:
Chad Reese3d305852012-08-21 11:45:07 -0700418 /* Process the RX timestamp if it was recorded */
419 if (p->has_rx_tstamp) {
420 /* The first 8 bytes are the timestamp */
421 u64 ns = *(u64 *)skb->data;
422 struct skb_shared_hwtstamps *ts;
423 ts = skb_hwtstamps(skb);
424 ts->hwtstamp = ns_to_ktime(ns);
425 ts->syststamp = ptp_to_ktime(ns);
426 __skb_pull(skb, 8);
427 }
David Daneyd6aa60a2009-10-14 12:04:41 -0700428 skb->protocol = eth_type_trans(skb, netdev);
429 netdev->stats.rx_packets++;
430 netdev->stats.rx_bytes += skb->len;
David Daneyd6aa60a2009-10-14 12:04:41 -0700431 netif_receive_skb(skb);
432 rc = 0;
433 } else if (re.s.code == RING_ENTRY_CODE_MORE) {
434 /*
435 * Packet split across skbs. This can happen if we
436 * increase the MTU. Buffers that are already in the
437 * rx ring can then end up being too small. As the rx
438 * ring is refilled, buffers sized for the new MTU
439 * will be used and we should go back to the normal
440 * non-split case.
441 */
442 skb_put(skb, re.s.len);
443 do {
444 re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
445 if (re2.s.code != RING_ENTRY_CODE_MORE
446 && re2.s.code != RING_ENTRY_CODE_DONE)
447 goto split_error;
448 skb_put(skb2, re2.s.len);
449 skb_new = skb_copy_expand(skb, 0, skb2->len,
450 GFP_ATOMIC);
451 if (!skb_new)
452 goto split_error;
453 if (skb_copy_bits(skb2, 0, skb_tail_pointer(skb_new),
454 skb2->len))
455 goto split_error;
456 skb_put(skb_new, skb2->len);
457 dev_kfree_skb_any(skb);
458 dev_kfree_skb_any(skb2);
459 skb = skb_new;
460 } while (re2.s.code == RING_ENTRY_CODE_MORE);
461 goto good;
462 } else {
463 /* Some other error, discard it. */
464 dev_kfree_skb_any(skb);
465 /*
466 * Error statistics are accumulated in
467 * octeon_mgmt_update_rx_stats.
468 */
469 }
470 goto done;
471split_error:
472 /* Discard the whole mess. */
473 dev_kfree_skb_any(skb);
474 dev_kfree_skb_any(skb2);
475 while (re2.s.code == RING_ENTRY_CODE_MORE) {
476 re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
477 dev_kfree_skb_any(skb2);
478 }
479 netdev->stats.rx_errors++;
480
481done:
482 /* Tell the hardware we processed a packet. */
483 mix_ircnt.u64 = 0;
484 mix_ircnt.s.ircnt = 1;
David Daney368bec02012-07-05 18:12:39 +0200485 cvmx_write_csr(p->mix + MIX_IRCNT, mix_ircnt.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700486 return rc;
David Daneyd6aa60a2009-10-14 12:04:41 -0700487}
488
489static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget)
490{
David Daneyd6aa60a2009-10-14 12:04:41 -0700491 unsigned int work_done = 0;
492 union cvmx_mixx_ircnt mix_ircnt;
493 int rc;
494
David Daney368bec02012-07-05 18:12:39 +0200495 mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT);
David Daneyd6aa60a2009-10-14 12:04:41 -0700496 while (work_done < budget && mix_ircnt.s.ircnt) {
497
498 rc = octeon_mgmt_receive_one(p);
499 if (!rc)
500 work_done++;
501
502 /* Check for more packets. */
David Daney368bec02012-07-05 18:12:39 +0200503 mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT);
David Daneyd6aa60a2009-10-14 12:04:41 -0700504 }
505
506 octeon_mgmt_rx_fill_ring(p->netdev);
507
508 return work_done;
509}
510
511static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget)
512{
513 struct octeon_mgmt *p = container_of(napi, struct octeon_mgmt, napi);
514 struct net_device *netdev = p->netdev;
515 unsigned int work_done = 0;
516
517 work_done = octeon_mgmt_receive_packets(p, budget);
518
519 if (work_done < budget) {
520 /* We stopped because no more packets were available. */
521 napi_complete(napi);
522 octeon_mgmt_enable_rx_irq(p);
523 }
524 octeon_mgmt_update_rx_stats(netdev);
525
526 return work_done;
527}
528
529/* Reset the hardware to clean state. */
530static void octeon_mgmt_reset_hw(struct octeon_mgmt *p)
531{
532 union cvmx_mixx_ctl mix_ctl;
533 union cvmx_mixx_bist mix_bist;
534 union cvmx_agl_gmx_bist agl_gmx_bist;
535
536 mix_ctl.u64 = 0;
David Daney368bec02012-07-05 18:12:39 +0200537 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700538 do {
David Daney368bec02012-07-05 18:12:39 +0200539 mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
David Daneyd6aa60a2009-10-14 12:04:41 -0700540 } while (mix_ctl.s.busy);
541 mix_ctl.s.reset = 1;
David Daney368bec02012-07-05 18:12:39 +0200542 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
543 cvmx_read_csr(p->mix + MIX_CTL);
David Daneyeeae05a2012-08-21 11:45:06 -0700544 octeon_io_clk_delay(64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700545
David Daney368bec02012-07-05 18:12:39 +0200546 mix_bist.u64 = cvmx_read_csr(p->mix + MIX_BIST);
David Daneyd6aa60a2009-10-14 12:04:41 -0700547 if (mix_bist.u64)
548 dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n",
549 (unsigned long long)mix_bist.u64);
550
551 agl_gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST);
552 if (agl_gmx_bist.u64)
553 dev_warn(p->dev, "AGL failed BIST (0x%016llx)\n",
554 (unsigned long long)agl_gmx_bist.u64);
555}
556
557struct octeon_mgmt_cam_state {
558 u64 cam[6];
559 u64 cam_mask;
560 int cam_index;
561};
562
563static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs,
564 unsigned char *addr)
565{
566 int i;
567
568 for (i = 0; i < 6; i++)
569 cs->cam[i] |= (u64)addr[i] << (8 * (cs->cam_index));
570 cs->cam_mask |= (1ULL << cs->cam_index);
571 cs->cam_index++;
572}
573
574static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
575{
576 struct octeon_mgmt *p = netdev_priv(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -0700577 union cvmx_agl_gmx_rxx_adr_ctl adr_ctl;
578 union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx;
579 unsigned long flags;
580 unsigned int prev_packet_enable;
581 unsigned int cam_mode = 1; /* 1 - Accept on CAM match */
582 unsigned int multicast_mode = 1; /* 1 - Reject all multicast. */
583 struct octeon_mgmt_cam_state cam_state;
Jiri Pirko22bedad32010-04-01 21:22:57 +0000584 struct netdev_hw_addr *ha;
David Daneyd6aa60a2009-10-14 12:04:41 -0700585 int available_cam_entries;
586
587 memset(&cam_state, 0, sizeof(cam_state));
588
David Daney62538d22010-05-05 13:03:08 +0000589 if ((netdev->flags & IFF_PROMISC) || netdev->uc.count > 7) {
David Daneyd6aa60a2009-10-14 12:04:41 -0700590 cam_mode = 0;
591 available_cam_entries = 8;
592 } else {
593 /*
594 * One CAM entry for the primary address, leaves seven
595 * for the secondary addresses.
596 */
David Daney62538d22010-05-05 13:03:08 +0000597 available_cam_entries = 7 - netdev->uc.count;
David Daneyd6aa60a2009-10-14 12:04:41 -0700598 }
599
600 if (netdev->flags & IFF_MULTICAST) {
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000601 if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) ||
602 netdev_mc_count(netdev) > available_cam_entries)
David Daney62538d22010-05-05 13:03:08 +0000603 multicast_mode = 2; /* 2 - Accept all multicast. */
David Daneyd6aa60a2009-10-14 12:04:41 -0700604 else
605 multicast_mode = 0; /* 0 - Use CAM. */
606 }
607
608 if (cam_mode == 1) {
609 /* Add primary address. */
610 octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr);
David Daney62538d22010-05-05 13:03:08 +0000611 netdev_for_each_uc_addr(ha, netdev)
612 octeon_mgmt_cam_state_add(&cam_state, ha->addr);
David Daneyd6aa60a2009-10-14 12:04:41 -0700613 }
614 if (multicast_mode == 0) {
Jiri Pirko22bedad32010-04-01 21:22:57 +0000615 netdev_for_each_mc_addr(ha, netdev)
616 octeon_mgmt_cam_state_add(&cam_state, ha->addr);
David Daneyd6aa60a2009-10-14 12:04:41 -0700617 }
618
David Daneyd6aa60a2009-10-14 12:04:41 -0700619 spin_lock_irqsave(&p->lock, flags);
620
621 /* Disable packet I/O. */
David Daney368bec02012-07-05 18:12:39 +0200622 agl_gmx_prtx.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
David Daneyd6aa60a2009-10-14 12:04:41 -0700623 prev_packet_enable = agl_gmx_prtx.s.en;
624 agl_gmx_prtx.s.en = 0;
David Daney368bec02012-07-05 18:12:39 +0200625 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700626
David Daneyd6aa60a2009-10-14 12:04:41 -0700627 adr_ctl.u64 = 0;
628 adr_ctl.s.cam_mode = cam_mode;
629 adr_ctl.s.mcst = multicast_mode;
630 adr_ctl.s.bcst = 1; /* Allow broadcast */
631
David Daney368bec02012-07-05 18:12:39 +0200632 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CTL, adr_ctl.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700633
David Daney368bec02012-07-05 18:12:39 +0200634 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM0, cam_state.cam[0]);
635 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM1, cam_state.cam[1]);
636 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM2, cam_state.cam[2]);
637 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM3, cam_state.cam[3]);
638 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM4, cam_state.cam[4]);
639 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM5, cam_state.cam[5]);
640 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM_EN, cam_state.cam_mask);
David Daneyd6aa60a2009-10-14 12:04:41 -0700641
642 /* Restore packet I/O. */
643 agl_gmx_prtx.s.en = prev_packet_enable;
David Daney368bec02012-07-05 18:12:39 +0200644 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700645
646 spin_unlock_irqrestore(&p->lock, flags);
647}
648
649static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr)
650{
651 struct sockaddr *sa = addr;
652
653 if (!is_valid_ether_addr(sa->sa_data))
654 return -EADDRNOTAVAIL;
655
656 memcpy(netdev->dev_addr, sa->sa_data, ETH_ALEN);
657
658 octeon_mgmt_set_rx_filtering(netdev);
659
660 return 0;
661}
662
663static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
664{
665 struct octeon_mgmt *p = netdev_priv(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -0700666 int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM;
667
668 /*
669 * Limit the MTU to make sure the ethernet packets are between
670 * 64 bytes and 16383 bytes.
671 */
672 if (size_without_fcs < 64 || size_without_fcs > 16383) {
673 dev_warn(p->dev, "MTU must be between %d and %d.\n",
674 64 - OCTEON_MGMT_RX_HEADROOM,
675 16383 - OCTEON_MGMT_RX_HEADROOM);
676 return -EINVAL;
677 }
678
679 netdev->mtu = new_mtu;
680
David Daney368bec02012-07-05 18:12:39 +0200681 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, size_without_fcs);
682 cvmx_write_csr(p->agl + AGL_GMX_RX_JABBER,
David Daneyd6aa60a2009-10-14 12:04:41 -0700683 (size_without_fcs + 7) & 0xfff8);
684
685 return 0;
686}
687
688static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id)
689{
690 struct net_device *netdev = dev_id;
691 struct octeon_mgmt *p = netdev_priv(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -0700692 union cvmx_mixx_isr mixx_isr;
693
David Daney368bec02012-07-05 18:12:39 +0200694 mixx_isr.u64 = cvmx_read_csr(p->mix + MIX_ISR);
David Daneyd6aa60a2009-10-14 12:04:41 -0700695
696 /* Clear any pending interrupts */
David Daney368bec02012-07-05 18:12:39 +0200697 cvmx_write_csr(p->mix + MIX_ISR, mixx_isr.u64);
698 cvmx_read_csr(p->mix + MIX_ISR);
David Daneyd6aa60a2009-10-14 12:04:41 -0700699
700 if (mixx_isr.s.irthresh) {
701 octeon_mgmt_disable_rx_irq(p);
702 napi_schedule(&p->napi);
703 }
704 if (mixx_isr.s.orthresh) {
705 octeon_mgmt_disable_tx_irq(p);
706 tasklet_schedule(&p->tx_clean_tasklet);
707 }
708
709 return IRQ_HANDLED;
710}
711
Chad Reese3d305852012-08-21 11:45:07 -0700712static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev,
713 struct ifreq *rq, int cmd)
714{
715 struct octeon_mgmt *p = netdev_priv(netdev);
716 struct hwtstamp_config config;
717 union cvmx_mio_ptp_clock_cfg ptp;
718 union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
719 bool have_hw_timestamps = false;
720
721 if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
722 return -EFAULT;
723
724 if (config.flags) /* reserved for future extensions */
725 return -EINVAL;
726
727 /* Check the status of hardware for tiemstamps */
728 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
729 /* Get the current state of the PTP clock */
730 ptp.u64 = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_CFG);
731 if (!ptp.s.ext_clk_en) {
732 /* The clock has not been configured to use an
733 * external source. Program it to use the main clock
734 * reference.
735 */
736 u64 clock_comp = (NSEC_PER_SEC << 32) / octeon_get_io_clock_rate();
737 if (!ptp.s.ptp_en)
738 cvmx_write_csr(CVMX_MIO_PTP_CLOCK_COMP, clock_comp);
739 pr_info("PTP Clock: Using sclk reference at %lld Hz\n",
740 (NSEC_PER_SEC << 32) / clock_comp);
741 } else {
742 /* The clock is already programmed to use a GPIO */
743 u64 clock_comp = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_COMP);
744 pr_info("PTP Clock: Using GPIO %d at %lld Hz\n",
745 ptp.s.ext_clk_in,
746 (NSEC_PER_SEC << 32) / clock_comp);
747 }
748
749 /* Enable the clock if it wasn't done already */
750 if (!ptp.s.ptp_en) {
751 ptp.s.ptp_en = 1;
752 cvmx_write_csr(CVMX_MIO_PTP_CLOCK_CFG, ptp.u64);
753 }
754 have_hw_timestamps = true;
755 }
756
757 if (!have_hw_timestamps)
758 return -EINVAL;
759
760 switch (config.tx_type) {
761 case HWTSTAMP_TX_OFF:
762 case HWTSTAMP_TX_ON:
763 break;
764 default:
765 return -ERANGE;
766 }
767
768 switch (config.rx_filter) {
769 case HWTSTAMP_FILTER_NONE:
770 p->has_rx_tstamp = false;
771 rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
772 rxx_frm_ctl.s.ptp_mode = 0;
773 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
774 break;
775 case HWTSTAMP_FILTER_ALL:
776 case HWTSTAMP_FILTER_SOME:
777 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
778 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
779 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
780 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
781 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
782 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
783 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
784 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
785 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
786 case HWTSTAMP_FILTER_PTP_V2_EVENT:
787 case HWTSTAMP_FILTER_PTP_V2_SYNC:
788 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
789 p->has_rx_tstamp = have_hw_timestamps;
790 config.rx_filter = HWTSTAMP_FILTER_ALL;
791 if (p->has_rx_tstamp) {
792 rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
793 rxx_frm_ctl.s.ptp_mode = 1;
794 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
795 }
796 break;
797 default:
798 return -ERANGE;
799 }
800
801 if (copy_to_user(rq->ifr_data, &config, sizeof(config)))
802 return -EFAULT;
803
804 return 0;
805}
806
David Daneyd6aa60a2009-10-14 12:04:41 -0700807static int octeon_mgmt_ioctl(struct net_device *netdev,
808 struct ifreq *rq, int cmd)
809{
810 struct octeon_mgmt *p = netdev_priv(netdev);
811
Chad Reese3d305852012-08-21 11:45:07 -0700812 switch (cmd) {
813 case SIOCSHWTSTAMP:
814 return octeon_mgmt_ioctl_hwtstamp(netdev, rq, cmd);
815 default:
816 if (p->phydev)
817 return phy_mii_ioctl(p->phydev, rq, cmd);
David Daneyd6aa60a2009-10-14 12:04:41 -0700818 return -EINVAL;
Chad Reese3d305852012-08-21 11:45:07 -0700819 }
David Daneyd6aa60a2009-10-14 12:04:41 -0700820}
821
David Daneyeeae05a2012-08-21 11:45:06 -0700822static void octeon_mgmt_disable_link(struct octeon_mgmt *p)
823{
824 union cvmx_agl_gmx_prtx_cfg prtx_cfg;
825
826 /* Disable GMX before we make any changes. */
827 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
828 prtx_cfg.s.en = 0;
829 prtx_cfg.s.tx_en = 0;
830 prtx_cfg.s.rx_en = 0;
831 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
832
833 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
834 int i;
835 for (i = 0; i < 10; i++) {
836 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
837 if (prtx_cfg.s.tx_idle == 1 || prtx_cfg.s.rx_idle == 1)
838 break;
839 mdelay(1);
840 i++;
841 }
842 }
843}
844
845static void octeon_mgmt_enable_link(struct octeon_mgmt *p)
846{
847 union cvmx_agl_gmx_prtx_cfg prtx_cfg;
848
849 /* Restore the GMX enable state only if link is set */
850 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
851 prtx_cfg.s.tx_en = 1;
852 prtx_cfg.s.rx_en = 1;
853 prtx_cfg.s.en = 1;
854 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
855}
856
857static void octeon_mgmt_update_link(struct octeon_mgmt *p)
858{
859 union cvmx_agl_gmx_prtx_cfg prtx_cfg;
860
861 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
862
863 if (!p->phydev->link)
864 prtx_cfg.s.duplex = 1;
865 else
866 prtx_cfg.s.duplex = p->phydev->duplex;
867
868 switch (p->phydev->speed) {
869 case 10:
870 prtx_cfg.s.speed = 0;
871 prtx_cfg.s.slottime = 0;
872
873 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
874 prtx_cfg.s.burst = 1;
875 prtx_cfg.s.speed_msb = 1;
876 }
877 break;
878 case 100:
879 prtx_cfg.s.speed = 0;
880 prtx_cfg.s.slottime = 0;
881
882 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
883 prtx_cfg.s.burst = 1;
884 prtx_cfg.s.speed_msb = 0;
885 }
886 break;
887 case 1000:
888 /* 1000 MBits is only supported on 6XXX chips */
889 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
890 prtx_cfg.s.speed = 1;
891 prtx_cfg.s.speed_msb = 0;
892 /* Only matters for half-duplex */
893 prtx_cfg.s.slottime = 1;
894 prtx_cfg.s.burst = p->phydev->duplex;
895 }
896 break;
897 case 0: /* No link */
898 default:
899 break;
900 }
901
902 /* Write the new GMX setting with the port still disabled. */
903 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
904
905 /* Read GMX CFG again to make sure the config is completed. */
906 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
907
908 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
909 union cvmx_agl_gmx_txx_clk agl_clk;
910 union cvmx_agl_prtx_ctl prtx_ctl;
911
912 prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
913 agl_clk.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_CLK);
914 /* MII (both speeds) and RGMII 1000 speed. */
915 agl_clk.s.clk_cnt = 1;
916 if (prtx_ctl.s.mode == 0) { /* RGMII mode */
917 if (p->phydev->speed == 10)
918 agl_clk.s.clk_cnt = 50;
919 else if (p->phydev->speed == 100)
920 agl_clk.s.clk_cnt = 5;
921 }
922 cvmx_write_csr(p->agl + AGL_GMX_TX_CLK, agl_clk.u64);
923 }
924}
925
David Daneyd6aa60a2009-10-14 12:04:41 -0700926static void octeon_mgmt_adjust_link(struct net_device *netdev)
927{
928 struct octeon_mgmt *p = netdev_priv(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -0700929 unsigned long flags;
930 int link_changed = 0;
931
David Daneyeeae05a2012-08-21 11:45:06 -0700932 if (!p->phydev)
933 return;
934
David Daneyd6aa60a2009-10-14 12:04:41 -0700935 spin_lock_irqsave(&p->lock, flags);
David Daneyeeae05a2012-08-21 11:45:06 -0700936
937
938 if (!p->phydev->link && p->last_link)
939 link_changed = -1;
940
941 if (p->phydev->link
942 && (p->last_duplex != p->phydev->duplex
943 || p->last_link != p->phydev->link
944 || p->last_speed != p->phydev->speed)) {
945 octeon_mgmt_disable_link(p);
946 link_changed = 1;
947 octeon_mgmt_update_link(p);
948 octeon_mgmt_enable_link(p);
David Daneyd6aa60a2009-10-14 12:04:41 -0700949 }
David Daneyeeae05a2012-08-21 11:45:06 -0700950
David Daneyd6aa60a2009-10-14 12:04:41 -0700951 p->last_link = p->phydev->link;
David Daneyeeae05a2012-08-21 11:45:06 -0700952 p->last_speed = p->phydev->speed;
953 p->last_duplex = p->phydev->duplex;
954
David Daneyd6aa60a2009-10-14 12:04:41 -0700955 spin_unlock_irqrestore(&p->lock, flags);
956
957 if (link_changed != 0) {
958 if (link_changed > 0) {
David Daneyd6aa60a2009-10-14 12:04:41 -0700959 pr_info("%s: Link is up - %d/%s\n", netdev->name,
960 p->phydev->speed,
961 DUPLEX_FULL == p->phydev->duplex ?
962 "Full" : "Half");
963 } else {
David Daneyd6aa60a2009-10-14 12:04:41 -0700964 pr_info("%s: Link is down\n", netdev->name);
965 }
966 }
967}
968
969static int octeon_mgmt_init_phy(struct net_device *netdev)
970{
971 struct octeon_mgmt *p = netdev_priv(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -0700972
David Daney368bec02012-07-05 18:12:39 +0200973 if (octeon_is_simulation() || p->phy_np == NULL) {
David Daneyd6aa60a2009-10-14 12:04:41 -0700974 /* No PHYs in the simulator. */
975 netif_carrier_on(netdev);
976 return 0;
977 }
978
David Daney368bec02012-07-05 18:12:39 +0200979 p->phydev = of_phy_connect(netdev, p->phy_np,
980 octeon_mgmt_adjust_link, 0,
981 PHY_INTERFACE_MODE_MII);
David Daneyd6aa60a2009-10-14 12:04:41 -0700982
David Daneyeeae05a2012-08-21 11:45:06 -0700983 if (p->phydev == NULL)
984 return -ENODEV;
David Daneyd6aa60a2009-10-14 12:04:41 -0700985
986 return 0;
987}
988
989static int octeon_mgmt_open(struct net_device *netdev)
990{
991 struct octeon_mgmt *p = netdev_priv(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -0700992 union cvmx_mixx_ctl mix_ctl;
993 union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode;
994 union cvmx_mixx_oring1 oring1;
995 union cvmx_mixx_iring1 iring1;
David Daneyd6aa60a2009-10-14 12:04:41 -0700996 union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
997 union cvmx_mixx_irhwm mix_irhwm;
998 union cvmx_mixx_orhwm mix_orhwm;
999 union cvmx_mixx_intena mix_intena;
1000 struct sockaddr sa;
1001
1002 /* Allocate ring buffers. */
1003 p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
1004 GFP_KERNEL);
1005 if (!p->tx_ring)
1006 return -ENOMEM;
1007 p->tx_ring_handle =
1008 dma_map_single(p->dev, p->tx_ring,
1009 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
1010 DMA_BIDIRECTIONAL);
1011 p->tx_next = 0;
1012 p->tx_next_clean = 0;
1013 p->tx_current_fill = 0;
1014
1015
1016 p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
1017 GFP_KERNEL);
1018 if (!p->rx_ring)
1019 goto err_nomem;
1020 p->rx_ring_handle =
1021 dma_map_single(p->dev, p->rx_ring,
1022 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
1023 DMA_BIDIRECTIONAL);
1024
1025 p->rx_next = 0;
1026 p->rx_next_fill = 0;
1027 p->rx_current_fill = 0;
1028
1029 octeon_mgmt_reset_hw(p);
1030
David Daney368bec02012-07-05 18:12:39 +02001031 mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
David Daneyd6aa60a2009-10-14 12:04:41 -07001032
1033 /* Bring it out of reset if needed. */
1034 if (mix_ctl.s.reset) {
1035 mix_ctl.s.reset = 0;
David Daney368bec02012-07-05 18:12:39 +02001036 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -07001037 do {
David Daney368bec02012-07-05 18:12:39 +02001038 mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
David Daneyd6aa60a2009-10-14 12:04:41 -07001039 } while (mix_ctl.s.reset);
1040 }
1041
David Daneyeeae05a2012-08-21 11:45:06 -07001042 if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) {
1043 agl_gmx_inf_mode.u64 = 0;
1044 agl_gmx_inf_mode.s.en = 1;
1045 cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
1046 }
1047 if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
1048 || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
1049 /*
1050 * Force compensation values, as they are not
1051 * determined properly by HW
1052 */
1053 union cvmx_agl_gmx_drv_ctl drv_ctl;
1054
1055 drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
1056 if (p->port) {
1057 drv_ctl.s.byp_en1 = 1;
1058 drv_ctl.s.nctl1 = 6;
1059 drv_ctl.s.pctl1 = 6;
1060 } else {
1061 drv_ctl.s.byp_en = 1;
1062 drv_ctl.s.nctl = 6;
1063 drv_ctl.s.pctl = 6;
1064 }
1065 cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64);
1066 }
David Daneyd6aa60a2009-10-14 12:04:41 -07001067
1068 oring1.u64 = 0;
1069 oring1.s.obase = p->tx_ring_handle >> 3;
1070 oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE;
David Daney368bec02012-07-05 18:12:39 +02001071 cvmx_write_csr(p->mix + MIX_ORING1, oring1.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -07001072
1073 iring1.u64 = 0;
1074 iring1.s.ibase = p->rx_ring_handle >> 3;
1075 iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE;
David Daney368bec02012-07-05 18:12:39 +02001076 cvmx_write_csr(p->mix + MIX_IRING1, iring1.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -07001077
David Daneyd6aa60a2009-10-14 12:04:41 -07001078 memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN);
1079 octeon_mgmt_set_mac_address(netdev, &sa);
1080
1081 octeon_mgmt_change_mtu(netdev, netdev->mtu);
1082
1083 /*
1084 * Enable the port HW. Packets are not allowed until
1085 * cvmx_mgmt_port_enable() is called.
1086 */
1087 mix_ctl.u64 = 0;
1088 mix_ctl.s.crc_strip = 1; /* Strip the ending CRC */
1089 mix_ctl.s.en = 1; /* Enable the port */
1090 mix_ctl.s.nbtarb = 0; /* Arbitration mode */
1091 /* MII CB-request FIFO programmable high watermark */
1092 mix_ctl.s.mrq_hwm = 1;
David Daneyeeae05a2012-08-21 11:45:06 -07001093#ifdef __LITTLE_ENDIAN
1094 mix_ctl.s.lendian = 1;
1095#endif
David Daney368bec02012-07-05 18:12:39 +02001096 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -07001097
David Daneyeeae05a2012-08-21 11:45:06 -07001098 /* Read the PHY to find the mode of the interface. */
1099 if (octeon_mgmt_init_phy(netdev)) {
1100 dev_err(p->dev, "Cannot initialize PHY on MIX%d.\n", p->port);
1101 goto err_noirq;
1102 }
David Daneyd6aa60a2009-10-14 12:04:41 -07001103
David Daneyeeae05a2012-08-21 11:45:06 -07001104 /* Set the mode of the interface, RGMII/MII. */
1105 if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && p->phydev) {
1106 union cvmx_agl_prtx_ctl agl_prtx_ctl;
1107 int rgmii_mode = (p->phydev->supported &
1108 (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)) != 0;
1109
1110 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1111 agl_prtx_ctl.s.mode = rgmii_mode ? 0 : 1;
1112 cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
1113
1114 /* MII clocks counts are based on the 125Mhz
1115 * reference, which has an 8nS period. So our delays
1116 * need to be multiplied by this factor.
1117 */
1118#define NS_PER_PHY_CLK 8
1119
1120 /* Take the DLL and clock tree out of reset */
1121 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1122 agl_prtx_ctl.s.clkrst = 0;
1123 if (rgmii_mode) {
1124 agl_prtx_ctl.s.dllrst = 0;
1125 agl_prtx_ctl.s.clktx_byp = 0;
David Daneyd6aa60a2009-10-14 12:04:41 -07001126 }
David Daneyeeae05a2012-08-21 11:45:06 -07001127 cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
1128 cvmx_read_csr(p->agl_prt_ctl); /* Force write out before wait */
1129
1130 /* Wait for the DLL to lock. External 125 MHz
1131 * reference clock must be stable at this point.
1132 */
1133 ndelay(256 * NS_PER_PHY_CLK);
1134
1135 /* Enable the interface */
1136 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1137 agl_prtx_ctl.s.enable = 1;
1138 cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
1139
1140 /* Read the value back to force the previous write */
1141 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1142
1143 /* Enable the compensation controller */
1144 agl_prtx_ctl.s.comp = 1;
1145 agl_prtx_ctl.s.drv_byp = 0;
1146 cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
1147 /* Force write out before wait. */
1148 cvmx_read_csr(p->agl_prt_ctl);
1149
1150 /* For compensation state to lock. */
1151 ndelay(1040 * NS_PER_PHY_CLK);
1152
1153 /* Some Ethernet switches cannot handle standard
1154 * Interframe Gap, increase to 16 bytes.
1155 */
1156 cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0x88);
David Daneyd6aa60a2009-10-14 12:04:41 -07001157 }
1158
1159 octeon_mgmt_rx_fill_ring(netdev);
1160
1161 /* Clear statistics. */
1162 /* Clear on read. */
David Daney368bec02012-07-05 18:12:39 +02001163 cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_CTL, 1);
1164 cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP, 0);
1165 cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD, 0);
David Daneyd6aa60a2009-10-14 12:04:41 -07001166
David Daney368bec02012-07-05 18:12:39 +02001167 cvmx_write_csr(p->agl + AGL_GMX_TX_STATS_CTL, 1);
1168 cvmx_write_csr(p->agl + AGL_GMX_TX_STAT0, 0);
1169 cvmx_write_csr(p->agl + AGL_GMX_TX_STAT1, 0);
David Daneyd6aa60a2009-10-14 12:04:41 -07001170
1171 /* Clear any pending interrupts */
David Daney368bec02012-07-05 18:12:39 +02001172 cvmx_write_csr(p->mix + MIX_ISR, cvmx_read_csr(p->mix + MIX_ISR));
David Daneyd6aa60a2009-10-14 12:04:41 -07001173
1174 if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name,
1175 netdev)) {
1176 dev_err(p->dev, "request_irq(%d) failed.\n", p->irq);
1177 goto err_noirq;
1178 }
1179
1180 /* Interrupt every single RX packet */
1181 mix_irhwm.u64 = 0;
1182 mix_irhwm.s.irhwm = 0;
David Daney368bec02012-07-05 18:12:39 +02001183 cvmx_write_csr(p->mix + MIX_IRHWM, mix_irhwm.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -07001184
David Daneyb635e062010-05-05 13:03:11 +00001185 /* Interrupt when we have 1 or more packets to clean. */
David Daneyd6aa60a2009-10-14 12:04:41 -07001186 mix_orhwm.u64 = 0;
David Daneyeeae05a2012-08-21 11:45:06 -07001187 mix_orhwm.s.orhwm = 0;
David Daney368bec02012-07-05 18:12:39 +02001188 cvmx_write_csr(p->mix + MIX_ORHWM, mix_orhwm.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -07001189
1190 /* Enable receive and transmit interrupts */
1191 mix_intena.u64 = 0;
1192 mix_intena.s.ithena = 1;
1193 mix_intena.s.othena = 1;
David Daney368bec02012-07-05 18:12:39 +02001194 cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -07001195
David Daneyd6aa60a2009-10-14 12:04:41 -07001196 /* Enable packet I/O. */
1197
1198 rxx_frm_ctl.u64 = 0;
Chad Reese3d305852012-08-21 11:45:07 -07001199 rxx_frm_ctl.s.ptp_mode = p->has_rx_tstamp ? 1 : 0;
David Daneyd6aa60a2009-10-14 12:04:41 -07001200 rxx_frm_ctl.s.pre_align = 1;
1201 /*
1202 * When set, disables the length check for non-min sized pkts
1203 * with padding in the client data.
1204 */
1205 rxx_frm_ctl.s.pad_len = 1;
1206 /* When set, disables the length check for VLAN pkts */
1207 rxx_frm_ctl.s.vlan_len = 1;
1208 /* When set, PREAMBLE checking is less strict */
1209 rxx_frm_ctl.s.pre_free = 1;
1210 /* Control Pause Frames can match station SMAC */
1211 rxx_frm_ctl.s.ctl_smac = 0;
1212 /* Control Pause Frames can match globally assign Multicast address */
1213 rxx_frm_ctl.s.ctl_mcst = 1;
1214 /* Forward pause information to TX block */
1215 rxx_frm_ctl.s.ctl_bck = 1;
1216 /* Drop Control Pause Frames */
1217 rxx_frm_ctl.s.ctl_drp = 1;
1218 /* Strip off the preamble */
1219 rxx_frm_ctl.s.pre_strp = 1;
1220 /*
1221 * This port is configured to send PREAMBLE+SFD to begin every
1222 * frame. GMX checks that the PREAMBLE is sent correctly.
1223 */
1224 rxx_frm_ctl.s.pre_chk = 1;
David Daney368bec02012-07-05 18:12:39 +02001225 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -07001226
David Daneyeeae05a2012-08-21 11:45:06 -07001227 /* Configure the port duplex, speed and enables */
1228 octeon_mgmt_disable_link(p);
1229 if (p->phydev)
1230 octeon_mgmt_update_link(p);
1231 octeon_mgmt_enable_link(p);
David Daneyd6aa60a2009-10-14 12:04:41 -07001232
1233 p->last_link = 0;
David Daneyeeae05a2012-08-21 11:45:06 -07001234 p->last_speed = 0;
1235 /* PHY is not present in simulator. The carrier is enabled
1236 * while initializing the phy for simulator, leave it enabled.
1237 */
1238 if (p->phydev) {
1239 netif_carrier_off(netdev);
1240 phy_start_aneg(p->phydev);
David Daneyd6aa60a2009-10-14 12:04:41 -07001241 }
1242
1243 netif_wake_queue(netdev);
1244 napi_enable(&p->napi);
1245
1246 return 0;
1247err_noirq:
1248 octeon_mgmt_reset_hw(p);
1249 dma_unmap_single(p->dev, p->rx_ring_handle,
1250 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
1251 DMA_BIDIRECTIONAL);
1252 kfree(p->rx_ring);
1253err_nomem:
1254 dma_unmap_single(p->dev, p->tx_ring_handle,
1255 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
1256 DMA_BIDIRECTIONAL);
1257 kfree(p->tx_ring);
1258 return -ENOMEM;
1259}
1260
1261static int octeon_mgmt_stop(struct net_device *netdev)
1262{
1263 struct octeon_mgmt *p = netdev_priv(netdev);
1264
1265 napi_disable(&p->napi);
1266 netif_stop_queue(netdev);
1267
1268 if (p->phydev)
1269 phy_disconnect(p->phydev);
David Daneyeeae05a2012-08-21 11:45:06 -07001270 p->phydev = NULL;
David Daneyd6aa60a2009-10-14 12:04:41 -07001271
1272 netif_carrier_off(netdev);
1273
1274 octeon_mgmt_reset_hw(p);
1275
David Daneyd6aa60a2009-10-14 12:04:41 -07001276 free_irq(p->irq, netdev);
1277
1278 /* dma_unmap is a nop on Octeon, so just free everything. */
1279 skb_queue_purge(&p->tx_list);
1280 skb_queue_purge(&p->rx_list);
1281
1282 dma_unmap_single(p->dev, p->rx_ring_handle,
1283 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
1284 DMA_BIDIRECTIONAL);
1285 kfree(p->rx_ring);
1286
1287 dma_unmap_single(p->dev, p->tx_ring_handle,
1288 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
1289 DMA_BIDIRECTIONAL);
1290 kfree(p->tx_ring);
1291
David Daneyd6aa60a2009-10-14 12:04:41 -07001292 return 0;
1293}
1294
1295static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
1296{
1297 struct octeon_mgmt *p = netdev_priv(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -07001298 union mgmt_port_ring_entry re;
1299 unsigned long flags;
David Daney4e4a4f12010-05-05 13:03:12 +00001300 int rv = NETDEV_TX_BUSY;
David Daneyd6aa60a2009-10-14 12:04:41 -07001301
1302 re.d64 = 0;
Chad Reese3d305852012-08-21 11:45:07 -07001303 re.s.tstamp = ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) != 0);
David Daneyd6aa60a2009-10-14 12:04:41 -07001304 re.s.len = skb->len;
1305 re.s.addr = dma_map_single(p->dev, skb->data,
1306 skb->len,
1307 DMA_TO_DEVICE);
1308
1309 spin_lock_irqsave(&p->tx_list.lock, flags);
1310
David Daney4e4a4f12010-05-05 13:03:12 +00001311 if (unlikely(p->tx_current_fill >= ring_max_fill(OCTEON_MGMT_TX_RING_SIZE) - 1)) {
1312 spin_unlock_irqrestore(&p->tx_list.lock, flags);
1313 netif_stop_queue(netdev);
1314 spin_lock_irqsave(&p->tx_list.lock, flags);
1315 }
1316
David Daneyd6aa60a2009-10-14 12:04:41 -07001317 if (unlikely(p->tx_current_fill >=
1318 ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) {
1319 spin_unlock_irqrestore(&p->tx_list.lock, flags);
David Daneyd6aa60a2009-10-14 12:04:41 -07001320 dma_unmap_single(p->dev, re.s.addr, re.s.len,
1321 DMA_TO_DEVICE);
David Daney4e4a4f12010-05-05 13:03:12 +00001322 goto out;
David Daneyd6aa60a2009-10-14 12:04:41 -07001323 }
1324
1325 __skb_queue_tail(&p->tx_list, skb);
1326
1327 /* Put it in the ring. */
1328 p->tx_ring[p->tx_next] = re.d64;
1329 p->tx_next = (p->tx_next + 1) % OCTEON_MGMT_TX_RING_SIZE;
1330 p->tx_current_fill++;
1331
1332 spin_unlock_irqrestore(&p->tx_list.lock, flags);
1333
1334 dma_sync_single_for_device(p->dev, p->tx_ring_handle,
1335 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
1336 DMA_BIDIRECTIONAL);
1337
1338 netdev->stats.tx_packets++;
1339 netdev->stats.tx_bytes += skb->len;
1340
1341 /* Ring the bell. */
David Daney368bec02012-07-05 18:12:39 +02001342 cvmx_write_csr(p->mix + MIX_ORING2, 1);
David Daneyd6aa60a2009-10-14 12:04:41 -07001343
David Daneyeeae05a2012-08-21 11:45:06 -07001344 netdev->trans_start = jiffies;
David Daney4e4a4f12010-05-05 13:03:12 +00001345 rv = NETDEV_TX_OK;
1346out:
David Daneyd6aa60a2009-10-14 12:04:41 -07001347 octeon_mgmt_update_tx_stats(netdev);
David Daney4e4a4f12010-05-05 13:03:12 +00001348 return rv;
David Daneyd6aa60a2009-10-14 12:04:41 -07001349}
1350
1351#ifdef CONFIG_NET_POLL_CONTROLLER
1352static void octeon_mgmt_poll_controller(struct net_device *netdev)
1353{
1354 struct octeon_mgmt *p = netdev_priv(netdev);
1355
1356 octeon_mgmt_receive_packets(p, 16);
1357 octeon_mgmt_update_rx_stats(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -07001358}
1359#endif
1360
1361static void octeon_mgmt_get_drvinfo(struct net_device *netdev,
1362 struct ethtool_drvinfo *info)
1363{
1364 strncpy(info->driver, DRV_NAME, sizeof(info->driver));
1365 strncpy(info->version, DRV_VERSION, sizeof(info->version));
1366 strncpy(info->fw_version, "N/A", sizeof(info->fw_version));
1367 strncpy(info->bus_info, "N/A", sizeof(info->bus_info));
1368 info->n_stats = 0;
1369 info->testinfo_len = 0;
1370 info->regdump_len = 0;
1371 info->eedump_len = 0;
1372}
1373
1374static int octeon_mgmt_get_settings(struct net_device *netdev,
1375 struct ethtool_cmd *cmd)
1376{
1377 struct octeon_mgmt *p = netdev_priv(netdev);
1378
1379 if (p->phydev)
1380 return phy_ethtool_gset(p->phydev, cmd);
1381
1382 return -EINVAL;
1383}
1384
1385static int octeon_mgmt_set_settings(struct net_device *netdev,
1386 struct ethtool_cmd *cmd)
1387{
1388 struct octeon_mgmt *p = netdev_priv(netdev);
1389
1390 if (!capable(CAP_NET_ADMIN))
1391 return -EPERM;
1392
1393 if (p->phydev)
1394 return phy_ethtool_sset(p->phydev, cmd);
1395
1396 return -EINVAL;
1397}
1398
1399static const struct ethtool_ops octeon_mgmt_ethtool_ops = {
1400 .get_drvinfo = octeon_mgmt_get_drvinfo,
1401 .get_link = ethtool_op_get_link,
1402 .get_settings = octeon_mgmt_get_settings,
1403 .set_settings = octeon_mgmt_set_settings
1404};
1405
1406static const struct net_device_ops octeon_mgmt_ops = {
1407 .ndo_open = octeon_mgmt_open,
1408 .ndo_stop = octeon_mgmt_stop,
1409 .ndo_start_xmit = octeon_mgmt_xmit,
David Daneyeeae05a2012-08-21 11:45:06 -07001410 .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering,
David Daneyd6aa60a2009-10-14 12:04:41 -07001411 .ndo_set_mac_address = octeon_mgmt_set_mac_address,
David Daneyeeae05a2012-08-21 11:45:06 -07001412 .ndo_do_ioctl = octeon_mgmt_ioctl,
David Daneyd6aa60a2009-10-14 12:04:41 -07001413 .ndo_change_mtu = octeon_mgmt_change_mtu,
1414#ifdef CONFIG_NET_POLL_CONTROLLER
1415 .ndo_poll_controller = octeon_mgmt_poll_controller,
1416#endif
1417};
1418
David Daneyd30b1812010-06-24 09:14:47 +00001419static int __devinit octeon_mgmt_probe(struct platform_device *pdev)
David Daneyd6aa60a2009-10-14 12:04:41 -07001420{
David Daneyd6aa60a2009-10-14 12:04:41 -07001421 struct net_device *netdev;
1422 struct octeon_mgmt *p;
David Daney368bec02012-07-05 18:12:39 +02001423 const __be32 *data;
1424 const u8 *mac;
1425 struct resource *res_mix;
1426 struct resource *res_agl;
David Daneyeeae05a2012-08-21 11:45:06 -07001427 struct resource *res_agl_prt_ctl;
David Daney368bec02012-07-05 18:12:39 +02001428 int len;
1429 int result;
David Daneyd6aa60a2009-10-14 12:04:41 -07001430
1431 netdev = alloc_etherdev(sizeof(struct octeon_mgmt));
1432 if (netdev == NULL)
1433 return -ENOMEM;
1434
1435 dev_set_drvdata(&pdev->dev, netdev);
1436 p = netdev_priv(netdev);
1437 netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll,
1438 OCTEON_MGMT_NAPI_WEIGHT);
1439
1440 p->netdev = netdev;
1441 p->dev = &pdev->dev;
Chad Reese3d305852012-08-21 11:45:07 -07001442 p->has_rx_tstamp = false;
David Daneyd6aa60a2009-10-14 12:04:41 -07001443
David Daney368bec02012-07-05 18:12:39 +02001444 data = of_get_property(pdev->dev.of_node, "cell-index", &len);
1445 if (data && len == sizeof(*data)) {
1446 p->port = be32_to_cpup(data);
1447 } else {
1448 dev_err(&pdev->dev, "no 'cell-index' property\n");
1449 result = -ENXIO;
1450 goto err;
1451 }
1452
David Daneyd6aa60a2009-10-14 12:04:41 -07001453 snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port);
1454
David Daney368bec02012-07-05 18:12:39 +02001455 result = platform_get_irq(pdev, 0);
1456 if (result < 0)
David Daneyd6aa60a2009-10-14 12:04:41 -07001457 goto err;
1458
David Daney368bec02012-07-05 18:12:39 +02001459 p->irq = result;
1460
1461 res_mix = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1462 if (res_mix == NULL) {
1463 dev_err(&pdev->dev, "no 'reg' resource\n");
1464 result = -ENXIO;
1465 goto err;
1466 }
1467
1468 res_agl = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1469 if (res_agl == NULL) {
1470 dev_err(&pdev->dev, "no 'reg' resource\n");
1471 result = -ENXIO;
1472 goto err;
1473 }
1474
David Daneyeeae05a2012-08-21 11:45:06 -07001475 res_agl_prt_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 3);
1476 if (res_agl_prt_ctl == NULL) {
1477 dev_err(&pdev->dev, "no 'reg' resource\n");
1478 result = -ENXIO;
1479 goto err;
1480 }
1481
David Daney368bec02012-07-05 18:12:39 +02001482 p->mix_phys = res_mix->start;
1483 p->mix_size = resource_size(res_mix);
1484 p->agl_phys = res_agl->start;
1485 p->agl_size = resource_size(res_agl);
David Daneyeeae05a2012-08-21 11:45:06 -07001486 p->agl_prt_ctl_phys = res_agl_prt_ctl->start;
1487 p->agl_prt_ctl_size = resource_size(res_agl_prt_ctl);
David Daney368bec02012-07-05 18:12:39 +02001488
1489
1490 if (!devm_request_mem_region(&pdev->dev, p->mix_phys, p->mix_size,
1491 res_mix->name)) {
1492 dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
1493 res_mix->name);
1494 result = -ENXIO;
1495 goto err;
1496 }
1497
1498 if (!devm_request_mem_region(&pdev->dev, p->agl_phys, p->agl_size,
1499 res_agl->name)) {
1500 result = -ENXIO;
1501 dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
1502 res_agl->name);
1503 goto err;
1504 }
1505
David Daneyeeae05a2012-08-21 11:45:06 -07001506 if (!devm_request_mem_region(&pdev->dev, p->agl_prt_ctl_phys,
1507 p->agl_prt_ctl_size, res_agl_prt_ctl->name)) {
1508 result = -ENXIO;
1509 dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
1510 res_agl_prt_ctl->name);
1511 goto err;
1512 }
David Daney368bec02012-07-05 18:12:39 +02001513
1514 p->mix = (u64)devm_ioremap(&pdev->dev, p->mix_phys, p->mix_size);
1515 p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size);
David Daneyeeae05a2012-08-21 11:45:06 -07001516 p->agl_prt_ctl = (u64)devm_ioremap(&pdev->dev, p->agl_prt_ctl_phys,
1517 p->agl_prt_ctl_size);
David Daneyd6aa60a2009-10-14 12:04:41 -07001518 spin_lock_init(&p->lock);
1519
1520 skb_queue_head_init(&p->tx_list);
1521 skb_queue_head_init(&p->rx_list);
1522 tasklet_init(&p->tx_clean_tasklet,
1523 octeon_mgmt_clean_tx_tasklet, (unsigned long)p);
1524
Jiri Pirko01789342011-08-16 06:29:00 +00001525 netdev->priv_flags |= IFF_UNICAST_FLT;
1526
David Daneyd6aa60a2009-10-14 12:04:41 -07001527 netdev->netdev_ops = &octeon_mgmt_ops;
1528 netdev->ethtool_ops = &octeon_mgmt_ethtool_ops;
1529
David Daney368bec02012-07-05 18:12:39 +02001530 mac = of_get_mac_address(pdev->dev.of_node);
David Daneyd6aa60a2009-10-14 12:04:41 -07001531
David Daney368bec02012-07-05 18:12:39 +02001532 if (mac)
1533 memcpy(netdev->dev_addr, mac, 6);
David Daneyd6aa60a2009-10-14 12:04:41 -07001534
David Daney368bec02012-07-05 18:12:39 +02001535 p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
1536
1537 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
1538 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
1539
David Daneyeeae05a2012-08-21 11:45:06 -07001540 netif_carrier_off(netdev);
David Daney368bec02012-07-05 18:12:39 +02001541 result = register_netdev(netdev);
1542 if (result)
David Daneyd6aa60a2009-10-14 12:04:41 -07001543 goto err;
1544
1545 dev_info(&pdev->dev, "Version " DRV_VERSION "\n");
1546 return 0;
David Daney368bec02012-07-05 18:12:39 +02001547
David Daneyd6aa60a2009-10-14 12:04:41 -07001548err:
1549 free_netdev(netdev);
David Daney368bec02012-07-05 18:12:39 +02001550 return result;
David Daneyd6aa60a2009-10-14 12:04:41 -07001551}
1552
David Daneyd30b1812010-06-24 09:14:47 +00001553static int __devexit octeon_mgmt_remove(struct platform_device *pdev)
David Daneyd6aa60a2009-10-14 12:04:41 -07001554{
1555 struct net_device *netdev = dev_get_drvdata(&pdev->dev);
1556
1557 unregister_netdev(netdev);
1558 free_netdev(netdev);
1559 return 0;
1560}
1561
David Daney368bec02012-07-05 18:12:39 +02001562static struct of_device_id octeon_mgmt_match[] = {
1563 {
1564 .compatible = "cavium,octeon-5750-mix",
1565 },
1566 {},
1567};
1568MODULE_DEVICE_TABLE(of, octeon_mgmt_match);
1569
David Daneyd6aa60a2009-10-14 12:04:41 -07001570static struct platform_driver octeon_mgmt_driver = {
1571 .driver = {
1572 .name = "octeon_mgmt",
1573 .owner = THIS_MODULE,
David Daney368bec02012-07-05 18:12:39 +02001574 .of_match_table = octeon_mgmt_match,
David Daneyd6aa60a2009-10-14 12:04:41 -07001575 },
1576 .probe = octeon_mgmt_probe,
David Daneyd30b1812010-06-24 09:14:47 +00001577 .remove = __devexit_p(octeon_mgmt_remove),
David Daneyd6aa60a2009-10-14 12:04:41 -07001578};
1579
1580extern void octeon_mdiobus_force_mod_depencency(void);
1581
1582static int __init octeon_mgmt_mod_init(void)
1583{
1584 /* Force our mdiobus driver module to be loaded first. */
1585 octeon_mdiobus_force_mod_depencency();
1586 return platform_driver_register(&octeon_mgmt_driver);
1587}
1588
1589static void __exit octeon_mgmt_mod_exit(void)
1590{
1591 platform_driver_unregister(&octeon_mgmt_driver);
1592}
1593
1594module_init(octeon_mgmt_mod_init);
1595module_exit(octeon_mgmt_mod_exit);
1596
1597MODULE_DESCRIPTION(DRV_DESCRIPTION);
1598MODULE_AUTHOR("David Daney");
1599MODULE_LICENSE("GPL");
1600MODULE_VERSION(DRV_VERSION);