blob: 979c6980639f59ea8fcd46f0b7588cd9b3c2b239 [file] [log] [blame]
David Daneyd6aa60a2009-10-14 12:04:41 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
David Daneyeeae05a2012-08-21 11:45:06 -07006 * Copyright (C) 2009-2012 Cavium, Inc
David Daneyd6aa60a2009-10-14 12:04:41 -07007 */
8
David Daneyd6aa60a2009-10-14 12:04:41 -07009#include <linux/platform_device.h>
David Daney368bec02012-07-05 18:12:39 +020010#include <linux/dma-mapping.h>
David Daneyd6aa60a2009-10-14 12:04:41 -070011#include <linux/etherdevice.h>
David Daney368bec02012-07-05 18:12:39 +020012#include <linux/capability.h>
Chad Reese3d305852012-08-21 11:45:07 -070013#include <linux/net_tstamp.h>
David Daney368bec02012-07-05 18:12:39 +020014#include <linux/interrupt.h>
15#include <linux/netdevice.h>
16#include <linux/spinlock.h>
David Daneyd6aa60a2009-10-14 12:04:41 -070017#include <linux/if_vlan.h>
David Daney368bec02012-07-05 18:12:39 +020018#include <linux/of_mdio.h>
19#include <linux/module.h>
20#include <linux/of_net.h>
21#include <linux/init.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090022#include <linux/slab.h>
David Daneyd6aa60a2009-10-14 12:04:41 -070023#include <linux/phy.h>
David Daney368bec02012-07-05 18:12:39 +020024#include <linux/io.h>
David Daneyd6aa60a2009-10-14 12:04:41 -070025
26#include <asm/octeon/octeon.h>
27#include <asm/octeon/cvmx-mixx-defs.h>
28#include <asm/octeon/cvmx-agl-defs.h>
29
30#define DRV_NAME "octeon_mgmt"
31#define DRV_VERSION "2.0"
32#define DRV_DESCRIPTION \
33 "Cavium Networks Octeon MII (management) port Network Driver"
34
35#define OCTEON_MGMT_NAPI_WEIGHT 16
36
David Daneya0ce9b12012-08-21 11:45:12 -070037/* Ring sizes that are powers of two allow for more efficient modulo
David Daneyd6aa60a2009-10-14 12:04:41 -070038 * opertions.
39 */
40#define OCTEON_MGMT_RX_RING_SIZE 512
41#define OCTEON_MGMT_TX_RING_SIZE 128
42
43/* Allow 8 bytes for vlan and FCS. */
44#define OCTEON_MGMT_RX_HEADROOM (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
45
46union mgmt_port_ring_entry {
47 u64 d64;
48 struct {
David Daneyd6aa60a2009-10-14 12:04:41 -070049#define RING_ENTRY_CODE_DONE 0xf
50#define RING_ENTRY_CODE_MORE 0x10
David Daney3ac19c92013-06-19 17:40:20 -070051#ifdef __BIG_ENDIAN_BITFIELD
52 u64 reserved_62_63:2;
53 /* Length of the buffer/packet in bytes */
54 u64 len:14;
55 /* For TX, signals that the packet should be timestamped */
56 u64 tstamp:1;
57 /* The RX error code */
58 u64 code:7;
David Daneyd6aa60a2009-10-14 12:04:41 -070059 /* Physical address of the buffer */
David Daney3ac19c92013-06-19 17:40:20 -070060 u64 addr:40;
61#else
62 u64 addr:40;
63 u64 code:7;
64 u64 tstamp:1;
65 u64 len:14;
66 u64 reserved_62_63:2;
67#endif
David Daneyd6aa60a2009-10-14 12:04:41 -070068 } s;
69};
70
David Daney368bec02012-07-05 18:12:39 +020071#define MIX_ORING1 0x0
72#define MIX_ORING2 0x8
73#define MIX_IRING1 0x10
74#define MIX_IRING2 0x18
75#define MIX_CTL 0x20
76#define MIX_IRHWM 0x28
77#define MIX_IRCNT 0x30
78#define MIX_ORHWM 0x38
79#define MIX_ORCNT 0x40
80#define MIX_ISR 0x48
81#define MIX_INTENA 0x50
82#define MIX_REMCNT 0x58
83#define MIX_BIST 0x78
84
85#define AGL_GMX_PRT_CFG 0x10
86#define AGL_GMX_RX_FRM_CTL 0x18
87#define AGL_GMX_RX_FRM_MAX 0x30
88#define AGL_GMX_RX_JABBER 0x38
89#define AGL_GMX_RX_STATS_CTL 0x50
90
91#define AGL_GMX_RX_STATS_PKTS_DRP 0xb0
92#define AGL_GMX_RX_STATS_OCTS_DRP 0xb8
93#define AGL_GMX_RX_STATS_PKTS_BAD 0xc0
94
95#define AGL_GMX_RX_ADR_CTL 0x100
96#define AGL_GMX_RX_ADR_CAM_EN 0x108
97#define AGL_GMX_RX_ADR_CAM0 0x180
98#define AGL_GMX_RX_ADR_CAM1 0x188
99#define AGL_GMX_RX_ADR_CAM2 0x190
100#define AGL_GMX_RX_ADR_CAM3 0x198
101#define AGL_GMX_RX_ADR_CAM4 0x1a0
102#define AGL_GMX_RX_ADR_CAM5 0x1a8
103
David Daneyeeae05a2012-08-21 11:45:06 -0700104#define AGL_GMX_TX_CLK 0x208
David Daney368bec02012-07-05 18:12:39 +0200105#define AGL_GMX_TX_STATS_CTL 0x268
106#define AGL_GMX_TX_CTL 0x270
107#define AGL_GMX_TX_STAT0 0x280
108#define AGL_GMX_TX_STAT1 0x288
109#define AGL_GMX_TX_STAT2 0x290
110#define AGL_GMX_TX_STAT3 0x298
111#define AGL_GMX_TX_STAT4 0x2a0
112#define AGL_GMX_TX_STAT5 0x2a8
113#define AGL_GMX_TX_STAT6 0x2b0
114#define AGL_GMX_TX_STAT7 0x2b8
115#define AGL_GMX_TX_STAT8 0x2c0
116#define AGL_GMX_TX_STAT9 0x2c8
117
David Daneyd6aa60a2009-10-14 12:04:41 -0700118struct octeon_mgmt {
119 struct net_device *netdev;
David Daney368bec02012-07-05 18:12:39 +0200120 u64 mix;
121 u64 agl;
David Daneyeeae05a2012-08-21 11:45:06 -0700122 u64 agl_prt_ctl;
David Daneyd6aa60a2009-10-14 12:04:41 -0700123 int port;
124 int irq;
Chad Reese3d305852012-08-21 11:45:07 -0700125 bool has_rx_tstamp;
David Daneyd6aa60a2009-10-14 12:04:41 -0700126 u64 *tx_ring;
127 dma_addr_t tx_ring_handle;
128 unsigned int tx_next;
129 unsigned int tx_next_clean;
130 unsigned int tx_current_fill;
131 /* The tx_list lock also protects the ring related variables */
132 struct sk_buff_head tx_list;
133
134 /* RX variables only touched in napi_poll. No locking necessary. */
135 u64 *rx_ring;
136 dma_addr_t rx_ring_handle;
137 unsigned int rx_next;
138 unsigned int rx_next_fill;
139 unsigned int rx_current_fill;
140 struct sk_buff_head rx_list;
141
142 spinlock_t lock;
143 unsigned int last_duplex;
144 unsigned int last_link;
David Daneyeeae05a2012-08-21 11:45:06 -0700145 unsigned int last_speed;
David Daneyd6aa60a2009-10-14 12:04:41 -0700146 struct device *dev;
147 struct napi_struct napi;
148 struct tasklet_struct tx_clean_tasklet;
149 struct phy_device *phydev;
David Daney368bec02012-07-05 18:12:39 +0200150 struct device_node *phy_np;
151 resource_size_t mix_phys;
152 resource_size_t mix_size;
153 resource_size_t agl_phys;
154 resource_size_t agl_size;
David Daneyeeae05a2012-08-21 11:45:06 -0700155 resource_size_t agl_prt_ctl_phys;
156 resource_size_t agl_prt_ctl_size;
David Daneyd6aa60a2009-10-14 12:04:41 -0700157};
158
159static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable)
160{
David Daneyd6aa60a2009-10-14 12:04:41 -0700161 union cvmx_mixx_intena mix_intena;
162 unsigned long flags;
163
164 spin_lock_irqsave(&p->lock, flags);
David Daney368bec02012-07-05 18:12:39 +0200165 mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA);
David Daneyd6aa60a2009-10-14 12:04:41 -0700166 mix_intena.s.ithena = enable ? 1 : 0;
David Daney368bec02012-07-05 18:12:39 +0200167 cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700168 spin_unlock_irqrestore(&p->lock, flags);
169}
170
171static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable)
172{
David Daneyd6aa60a2009-10-14 12:04:41 -0700173 union cvmx_mixx_intena mix_intena;
174 unsigned long flags;
175
176 spin_lock_irqsave(&p->lock, flags);
David Daney368bec02012-07-05 18:12:39 +0200177 mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA);
David Daneyd6aa60a2009-10-14 12:04:41 -0700178 mix_intena.s.othena = enable ? 1 : 0;
David Daney368bec02012-07-05 18:12:39 +0200179 cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700180 spin_unlock_irqrestore(&p->lock, flags);
181}
182
David Daneye96f7512012-08-21 11:45:11 -0700183static void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p)
David Daneyd6aa60a2009-10-14 12:04:41 -0700184{
185 octeon_mgmt_set_rx_irq(p, 1);
186}
187
David Daneye96f7512012-08-21 11:45:11 -0700188static void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p)
David Daneyd6aa60a2009-10-14 12:04:41 -0700189{
190 octeon_mgmt_set_rx_irq(p, 0);
191}
192
David Daneye96f7512012-08-21 11:45:11 -0700193static void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p)
David Daneyd6aa60a2009-10-14 12:04:41 -0700194{
195 octeon_mgmt_set_tx_irq(p, 1);
196}
197
David Daneye96f7512012-08-21 11:45:11 -0700198static void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p)
David Daneyd6aa60a2009-10-14 12:04:41 -0700199{
200 octeon_mgmt_set_tx_irq(p, 0);
201}
202
203static unsigned int ring_max_fill(unsigned int ring_size)
204{
205 return ring_size - 8;
206}
207
208static unsigned int ring_size_to_bytes(unsigned int ring_size)
209{
210 return ring_size * sizeof(union mgmt_port_ring_entry);
211}
212
213static void octeon_mgmt_rx_fill_ring(struct net_device *netdev)
214{
215 struct octeon_mgmt *p = netdev_priv(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -0700216
217 while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) {
218 unsigned int size;
219 union mgmt_port_ring_entry re;
220 struct sk_buff *skb;
221
222 /* CN56XX pass 1 needs 8 bytes of padding. */
223 size = netdev->mtu + OCTEON_MGMT_RX_HEADROOM + 8 + NET_IP_ALIGN;
224
225 skb = netdev_alloc_skb(netdev, size);
226 if (!skb)
227 break;
228 skb_reserve(skb, NET_IP_ALIGN);
229 __skb_queue_tail(&p->rx_list, skb);
230
231 re.d64 = 0;
232 re.s.len = size;
233 re.s.addr = dma_map_single(p->dev, skb->data,
234 size,
235 DMA_FROM_DEVICE);
236
237 /* Put it in the ring. */
238 p->rx_ring[p->rx_next_fill] = re.d64;
239 dma_sync_single_for_device(p->dev, p->rx_ring_handle,
240 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
241 DMA_BIDIRECTIONAL);
242 p->rx_next_fill =
243 (p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE;
244 p->rx_current_fill++;
245 /* Ring the bell. */
David Daney368bec02012-07-05 18:12:39 +0200246 cvmx_write_csr(p->mix + MIX_IRING2, 1);
David Daneyd6aa60a2009-10-14 12:04:41 -0700247 }
248}
249
250static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
251{
David Daneyd6aa60a2009-10-14 12:04:41 -0700252 union cvmx_mixx_orcnt mix_orcnt;
253 union mgmt_port_ring_entry re;
254 struct sk_buff *skb;
255 int cleaned = 0;
256 unsigned long flags;
257
David Daney368bec02012-07-05 18:12:39 +0200258 mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
David Daneyd6aa60a2009-10-14 12:04:41 -0700259 while (mix_orcnt.s.orcnt) {
David Daney4d30b802010-05-05 13:03:09 +0000260 spin_lock_irqsave(&p->tx_list.lock, flags);
261
David Daney368bec02012-07-05 18:12:39 +0200262 mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
David Daney4d30b802010-05-05 13:03:09 +0000263
264 if (mix_orcnt.s.orcnt == 0) {
265 spin_unlock_irqrestore(&p->tx_list.lock, flags);
266 break;
267 }
268
David Daneyd6aa60a2009-10-14 12:04:41 -0700269 dma_sync_single_for_cpu(p->dev, p->tx_ring_handle,
270 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
271 DMA_BIDIRECTIONAL);
272
David Daneyd6aa60a2009-10-14 12:04:41 -0700273 re.d64 = p->tx_ring[p->tx_next_clean];
274 p->tx_next_clean =
275 (p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE;
276 skb = __skb_dequeue(&p->tx_list);
277
278 mix_orcnt.u64 = 0;
279 mix_orcnt.s.orcnt = 1;
280
281 /* Acknowledge to hardware that we have the buffer. */
David Daney368bec02012-07-05 18:12:39 +0200282 cvmx_write_csr(p->mix + MIX_ORCNT, mix_orcnt.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700283 p->tx_current_fill--;
284
285 spin_unlock_irqrestore(&p->tx_list.lock, flags);
286
287 dma_unmap_single(p->dev, re.s.addr, re.s.len,
288 DMA_TO_DEVICE);
Chad Reese3d305852012-08-21 11:45:07 -0700289
290 /* Read the hardware TX timestamp if one was recorded */
291 if (unlikely(re.s.tstamp)) {
292 struct skb_shared_hwtstamps ts;
Willem de Bruijnc6d5fef2014-07-25 18:01:29 -0400293 memset(&ts, 0, sizeof(ts));
Chad Reese3d305852012-08-21 11:45:07 -0700294 /* Read the timestamp */
295 u64 ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port));
296 /* Remove the timestamp from the FIFO */
297 cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0);
298 /* Tell the kernel about the timestamp */
Chad Reese3d305852012-08-21 11:45:07 -0700299 ts.hwtstamp = ns_to_ktime(ns);
300 skb_tstamp_tx(skb, &ts);
301 }
302
David Daneyd6aa60a2009-10-14 12:04:41 -0700303 dev_kfree_skb_any(skb);
304 cleaned++;
305
David Daney368bec02012-07-05 18:12:39 +0200306 mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
David Daneyd6aa60a2009-10-14 12:04:41 -0700307 }
308
309 if (cleaned && netif_queue_stopped(p->netdev))
310 netif_wake_queue(p->netdev);
311}
312
313static void octeon_mgmt_clean_tx_tasklet(unsigned long arg)
314{
315 struct octeon_mgmt *p = (struct octeon_mgmt *)arg;
316 octeon_mgmt_clean_tx_buffers(p);
317 octeon_mgmt_enable_tx_irq(p);
318}
319
320static void octeon_mgmt_update_rx_stats(struct net_device *netdev)
321{
322 struct octeon_mgmt *p = netdev_priv(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -0700323 unsigned long flags;
324 u64 drop, bad;
325
326 /* These reads also clear the count registers. */
David Daney368bec02012-07-05 18:12:39 +0200327 drop = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP);
328 bad = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD);
David Daneyd6aa60a2009-10-14 12:04:41 -0700329
330 if (drop || bad) {
331 /* Do an atomic update. */
332 spin_lock_irqsave(&p->lock, flags);
333 netdev->stats.rx_errors += bad;
334 netdev->stats.rx_dropped += drop;
335 spin_unlock_irqrestore(&p->lock, flags);
336 }
337}
338
339static void octeon_mgmt_update_tx_stats(struct net_device *netdev)
340{
341 struct octeon_mgmt *p = netdev_priv(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -0700342 unsigned long flags;
343
344 union cvmx_agl_gmx_txx_stat0 s0;
345 union cvmx_agl_gmx_txx_stat1 s1;
346
347 /* These reads also clear the count registers. */
David Daney368bec02012-07-05 18:12:39 +0200348 s0.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT0);
349 s1.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT1);
David Daneyd6aa60a2009-10-14 12:04:41 -0700350
351 if (s0.s.xsdef || s0.s.xscol || s1.s.scol || s1.s.mcol) {
352 /* Do an atomic update. */
353 spin_lock_irqsave(&p->lock, flags);
354 netdev->stats.tx_errors += s0.s.xsdef + s0.s.xscol;
355 netdev->stats.collisions += s1.s.scol + s1.s.mcol;
356 spin_unlock_irqrestore(&p->lock, flags);
357 }
358}
359
360/*
361 * Dequeue a receive skb and its corresponding ring entry. The ring
362 * entry is returned, *pskb is updated to point to the skb.
363 */
364static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p,
365 struct sk_buff **pskb)
366{
367 union mgmt_port_ring_entry re;
368
369 dma_sync_single_for_cpu(p->dev, p->rx_ring_handle,
370 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
371 DMA_BIDIRECTIONAL);
372
373 re.d64 = p->rx_ring[p->rx_next];
374 p->rx_next = (p->rx_next + 1) % OCTEON_MGMT_RX_RING_SIZE;
375 p->rx_current_fill--;
376 *pskb = __skb_dequeue(&p->rx_list);
377
378 dma_unmap_single(p->dev, re.s.addr,
379 ETH_FRAME_LEN + OCTEON_MGMT_RX_HEADROOM,
380 DMA_FROM_DEVICE);
381
382 return re.d64;
383}
384
385
386static int octeon_mgmt_receive_one(struct octeon_mgmt *p)
387{
David Daneyd6aa60a2009-10-14 12:04:41 -0700388 struct net_device *netdev = p->netdev;
389 union cvmx_mixx_ircnt mix_ircnt;
390 union mgmt_port_ring_entry re;
391 struct sk_buff *skb;
392 struct sk_buff *skb2;
393 struct sk_buff *skb_new;
394 union mgmt_port_ring_entry re2;
395 int rc = 1;
396
397
398 re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb);
399 if (likely(re.s.code == RING_ENTRY_CODE_DONE)) {
400 /* A good packet, send it up. */
401 skb_put(skb, re.s.len);
402good:
Chad Reese3d305852012-08-21 11:45:07 -0700403 /* Process the RX timestamp if it was recorded */
404 if (p->has_rx_tstamp) {
405 /* The first 8 bytes are the timestamp */
406 u64 ns = *(u64 *)skb->data;
407 struct skb_shared_hwtstamps *ts;
408 ts = skb_hwtstamps(skb);
409 ts->hwtstamp = ns_to_ktime(ns);
Chad Reese3d305852012-08-21 11:45:07 -0700410 __skb_pull(skb, 8);
411 }
David Daneyd6aa60a2009-10-14 12:04:41 -0700412 skb->protocol = eth_type_trans(skb, netdev);
413 netdev->stats.rx_packets++;
414 netdev->stats.rx_bytes += skb->len;
David Daneyd6aa60a2009-10-14 12:04:41 -0700415 netif_receive_skb(skb);
416 rc = 0;
417 } else if (re.s.code == RING_ENTRY_CODE_MORE) {
David Daneya0ce9b12012-08-21 11:45:12 -0700418 /* Packet split across skbs. This can happen if we
David Daneyd6aa60a2009-10-14 12:04:41 -0700419 * increase the MTU. Buffers that are already in the
420 * rx ring can then end up being too small. As the rx
421 * ring is refilled, buffers sized for the new MTU
422 * will be used and we should go back to the normal
423 * non-split case.
424 */
425 skb_put(skb, re.s.len);
426 do {
427 re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
428 if (re2.s.code != RING_ENTRY_CODE_MORE
429 && re2.s.code != RING_ENTRY_CODE_DONE)
430 goto split_error;
431 skb_put(skb2, re2.s.len);
432 skb_new = skb_copy_expand(skb, 0, skb2->len,
433 GFP_ATOMIC);
434 if (!skb_new)
435 goto split_error;
436 if (skb_copy_bits(skb2, 0, skb_tail_pointer(skb_new),
437 skb2->len))
438 goto split_error;
439 skb_put(skb_new, skb2->len);
440 dev_kfree_skb_any(skb);
441 dev_kfree_skb_any(skb2);
442 skb = skb_new;
443 } while (re2.s.code == RING_ENTRY_CODE_MORE);
444 goto good;
445 } else {
446 /* Some other error, discard it. */
447 dev_kfree_skb_any(skb);
David Daneya0ce9b12012-08-21 11:45:12 -0700448 /* Error statistics are accumulated in
David Daneyd6aa60a2009-10-14 12:04:41 -0700449 * octeon_mgmt_update_rx_stats.
450 */
451 }
452 goto done;
453split_error:
454 /* Discard the whole mess. */
455 dev_kfree_skb_any(skb);
456 dev_kfree_skb_any(skb2);
457 while (re2.s.code == RING_ENTRY_CODE_MORE) {
458 re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
459 dev_kfree_skb_any(skb2);
460 }
461 netdev->stats.rx_errors++;
462
463done:
464 /* Tell the hardware we processed a packet. */
465 mix_ircnt.u64 = 0;
466 mix_ircnt.s.ircnt = 1;
David Daney368bec02012-07-05 18:12:39 +0200467 cvmx_write_csr(p->mix + MIX_IRCNT, mix_ircnt.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700468 return rc;
David Daneyd6aa60a2009-10-14 12:04:41 -0700469}
470
471static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget)
472{
David Daneyd6aa60a2009-10-14 12:04:41 -0700473 unsigned int work_done = 0;
474 union cvmx_mixx_ircnt mix_ircnt;
475 int rc;
476
David Daney368bec02012-07-05 18:12:39 +0200477 mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT);
David Daneyd6aa60a2009-10-14 12:04:41 -0700478 while (work_done < budget && mix_ircnt.s.ircnt) {
479
480 rc = octeon_mgmt_receive_one(p);
481 if (!rc)
482 work_done++;
483
484 /* Check for more packets. */
David Daney368bec02012-07-05 18:12:39 +0200485 mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT);
David Daneyd6aa60a2009-10-14 12:04:41 -0700486 }
487
488 octeon_mgmt_rx_fill_ring(p->netdev);
489
490 return work_done;
491}
492
493static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget)
494{
495 struct octeon_mgmt *p = container_of(napi, struct octeon_mgmt, napi);
496 struct net_device *netdev = p->netdev;
497 unsigned int work_done = 0;
498
499 work_done = octeon_mgmt_receive_packets(p, budget);
500
501 if (work_done < budget) {
502 /* We stopped because no more packets were available. */
503 napi_complete(napi);
504 octeon_mgmt_enable_rx_irq(p);
505 }
506 octeon_mgmt_update_rx_stats(netdev);
507
508 return work_done;
509}
510
511/* Reset the hardware to clean state. */
512static void octeon_mgmt_reset_hw(struct octeon_mgmt *p)
513{
514 union cvmx_mixx_ctl mix_ctl;
515 union cvmx_mixx_bist mix_bist;
516 union cvmx_agl_gmx_bist agl_gmx_bist;
517
518 mix_ctl.u64 = 0;
David Daney368bec02012-07-05 18:12:39 +0200519 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700520 do {
David Daney368bec02012-07-05 18:12:39 +0200521 mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
David Daneyd6aa60a2009-10-14 12:04:41 -0700522 } while (mix_ctl.s.busy);
523 mix_ctl.s.reset = 1;
David Daney368bec02012-07-05 18:12:39 +0200524 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
525 cvmx_read_csr(p->mix + MIX_CTL);
David Daneyeeae05a2012-08-21 11:45:06 -0700526 octeon_io_clk_delay(64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700527
David Daney368bec02012-07-05 18:12:39 +0200528 mix_bist.u64 = cvmx_read_csr(p->mix + MIX_BIST);
David Daneyd6aa60a2009-10-14 12:04:41 -0700529 if (mix_bist.u64)
530 dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n",
531 (unsigned long long)mix_bist.u64);
532
533 agl_gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST);
534 if (agl_gmx_bist.u64)
535 dev_warn(p->dev, "AGL failed BIST (0x%016llx)\n",
536 (unsigned long long)agl_gmx_bist.u64);
537}
538
539struct octeon_mgmt_cam_state {
540 u64 cam[6];
541 u64 cam_mask;
542 int cam_index;
543};
544
545static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs,
546 unsigned char *addr)
547{
548 int i;
549
550 for (i = 0; i < 6; i++)
551 cs->cam[i] |= (u64)addr[i] << (8 * (cs->cam_index));
552 cs->cam_mask |= (1ULL << cs->cam_index);
553 cs->cam_index++;
554}
555
556static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
557{
558 struct octeon_mgmt *p = netdev_priv(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -0700559 union cvmx_agl_gmx_rxx_adr_ctl adr_ctl;
560 union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx;
561 unsigned long flags;
562 unsigned int prev_packet_enable;
563 unsigned int cam_mode = 1; /* 1 - Accept on CAM match */
564 unsigned int multicast_mode = 1; /* 1 - Reject all multicast. */
565 struct octeon_mgmt_cam_state cam_state;
Jiri Pirko22bedad32010-04-01 21:22:57 +0000566 struct netdev_hw_addr *ha;
David Daneyd6aa60a2009-10-14 12:04:41 -0700567 int available_cam_entries;
568
569 memset(&cam_state, 0, sizeof(cam_state));
570
David Daney62538d22010-05-05 13:03:08 +0000571 if ((netdev->flags & IFF_PROMISC) || netdev->uc.count > 7) {
David Daneyd6aa60a2009-10-14 12:04:41 -0700572 cam_mode = 0;
573 available_cam_entries = 8;
574 } else {
David Daneya0ce9b12012-08-21 11:45:12 -0700575 /* One CAM entry for the primary address, leaves seven
David Daneyd6aa60a2009-10-14 12:04:41 -0700576 * for the secondary addresses.
577 */
David Daney62538d22010-05-05 13:03:08 +0000578 available_cam_entries = 7 - netdev->uc.count;
David Daneyd6aa60a2009-10-14 12:04:41 -0700579 }
580
581 if (netdev->flags & IFF_MULTICAST) {
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000582 if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) ||
583 netdev_mc_count(netdev) > available_cam_entries)
David Daney62538d22010-05-05 13:03:08 +0000584 multicast_mode = 2; /* 2 - Accept all multicast. */
David Daneyd6aa60a2009-10-14 12:04:41 -0700585 else
586 multicast_mode = 0; /* 0 - Use CAM. */
587 }
588
589 if (cam_mode == 1) {
590 /* Add primary address. */
591 octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr);
David Daney62538d22010-05-05 13:03:08 +0000592 netdev_for_each_uc_addr(ha, netdev)
593 octeon_mgmt_cam_state_add(&cam_state, ha->addr);
David Daneyd6aa60a2009-10-14 12:04:41 -0700594 }
595 if (multicast_mode == 0) {
Jiri Pirko22bedad32010-04-01 21:22:57 +0000596 netdev_for_each_mc_addr(ha, netdev)
597 octeon_mgmt_cam_state_add(&cam_state, ha->addr);
David Daneyd6aa60a2009-10-14 12:04:41 -0700598 }
599
David Daneyd6aa60a2009-10-14 12:04:41 -0700600 spin_lock_irqsave(&p->lock, flags);
601
602 /* Disable packet I/O. */
David Daney368bec02012-07-05 18:12:39 +0200603 agl_gmx_prtx.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
David Daneyd6aa60a2009-10-14 12:04:41 -0700604 prev_packet_enable = agl_gmx_prtx.s.en;
605 agl_gmx_prtx.s.en = 0;
David Daney368bec02012-07-05 18:12:39 +0200606 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700607
David Daneyd6aa60a2009-10-14 12:04:41 -0700608 adr_ctl.u64 = 0;
609 adr_ctl.s.cam_mode = cam_mode;
610 adr_ctl.s.mcst = multicast_mode;
611 adr_ctl.s.bcst = 1; /* Allow broadcast */
612
David Daney368bec02012-07-05 18:12:39 +0200613 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CTL, adr_ctl.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700614
David Daney368bec02012-07-05 18:12:39 +0200615 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM0, cam_state.cam[0]);
616 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM1, cam_state.cam[1]);
617 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM2, cam_state.cam[2]);
618 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM3, cam_state.cam[3]);
619 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM4, cam_state.cam[4]);
620 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM5, cam_state.cam[5]);
621 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM_EN, cam_state.cam_mask);
David Daneyd6aa60a2009-10-14 12:04:41 -0700622
623 /* Restore packet I/O. */
624 agl_gmx_prtx.s.en = prev_packet_enable;
David Daney368bec02012-07-05 18:12:39 +0200625 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700626
627 spin_unlock_irqrestore(&p->lock, flags);
628}
629
630static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr)
631{
David Daneyf3212382012-08-21 11:45:10 -0700632 int r = eth_mac_addr(netdev, addr);
David Daneyd6aa60a2009-10-14 12:04:41 -0700633
David Daneyf3212382012-08-21 11:45:10 -0700634 if (r)
635 return r;
David Daneyd6aa60a2009-10-14 12:04:41 -0700636
637 octeon_mgmt_set_rx_filtering(netdev);
638
639 return 0;
640}
641
642static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
643{
644 struct octeon_mgmt *p = netdev_priv(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -0700645 int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM;
646
David Daneya0ce9b12012-08-21 11:45:12 -0700647 /* Limit the MTU to make sure the ethernet packets are between
David Daneyd6aa60a2009-10-14 12:04:41 -0700648 * 64 bytes and 16383 bytes.
649 */
650 if (size_without_fcs < 64 || size_without_fcs > 16383) {
651 dev_warn(p->dev, "MTU must be between %d and %d.\n",
652 64 - OCTEON_MGMT_RX_HEADROOM,
653 16383 - OCTEON_MGMT_RX_HEADROOM);
654 return -EINVAL;
655 }
656
657 netdev->mtu = new_mtu;
658
David Daney368bec02012-07-05 18:12:39 +0200659 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, size_without_fcs);
660 cvmx_write_csr(p->agl + AGL_GMX_RX_JABBER,
David Daneyd6aa60a2009-10-14 12:04:41 -0700661 (size_without_fcs + 7) & 0xfff8);
662
663 return 0;
664}
665
666static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id)
667{
668 struct net_device *netdev = dev_id;
669 struct octeon_mgmt *p = netdev_priv(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -0700670 union cvmx_mixx_isr mixx_isr;
671
David Daney368bec02012-07-05 18:12:39 +0200672 mixx_isr.u64 = cvmx_read_csr(p->mix + MIX_ISR);
David Daneyd6aa60a2009-10-14 12:04:41 -0700673
674 /* Clear any pending interrupts */
David Daney368bec02012-07-05 18:12:39 +0200675 cvmx_write_csr(p->mix + MIX_ISR, mixx_isr.u64);
676 cvmx_read_csr(p->mix + MIX_ISR);
David Daneyd6aa60a2009-10-14 12:04:41 -0700677
678 if (mixx_isr.s.irthresh) {
679 octeon_mgmt_disable_rx_irq(p);
680 napi_schedule(&p->napi);
681 }
682 if (mixx_isr.s.orthresh) {
683 octeon_mgmt_disable_tx_irq(p);
684 tasklet_schedule(&p->tx_clean_tasklet);
685 }
686
687 return IRQ_HANDLED;
688}
689
Chad Reese3d305852012-08-21 11:45:07 -0700690static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev,
691 struct ifreq *rq, int cmd)
692{
693 struct octeon_mgmt *p = netdev_priv(netdev);
694 struct hwtstamp_config config;
695 union cvmx_mio_ptp_clock_cfg ptp;
696 union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
697 bool have_hw_timestamps = false;
698
699 if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
700 return -EFAULT;
701
702 if (config.flags) /* reserved for future extensions */
703 return -EINVAL;
704
705 /* Check the status of hardware for tiemstamps */
706 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
707 /* Get the current state of the PTP clock */
708 ptp.u64 = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_CFG);
709 if (!ptp.s.ext_clk_en) {
710 /* The clock has not been configured to use an
711 * external source. Program it to use the main clock
712 * reference.
713 */
714 u64 clock_comp = (NSEC_PER_SEC << 32) / octeon_get_io_clock_rate();
715 if (!ptp.s.ptp_en)
716 cvmx_write_csr(CVMX_MIO_PTP_CLOCK_COMP, clock_comp);
717 pr_info("PTP Clock: Using sclk reference at %lld Hz\n",
718 (NSEC_PER_SEC << 32) / clock_comp);
719 } else {
720 /* The clock is already programmed to use a GPIO */
721 u64 clock_comp = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_COMP);
722 pr_info("PTP Clock: Using GPIO %d at %lld Hz\n",
723 ptp.s.ext_clk_in,
724 (NSEC_PER_SEC << 32) / clock_comp);
725 }
726
727 /* Enable the clock if it wasn't done already */
728 if (!ptp.s.ptp_en) {
729 ptp.s.ptp_en = 1;
730 cvmx_write_csr(CVMX_MIO_PTP_CLOCK_CFG, ptp.u64);
731 }
732 have_hw_timestamps = true;
733 }
734
735 if (!have_hw_timestamps)
736 return -EINVAL;
737
738 switch (config.tx_type) {
739 case HWTSTAMP_TX_OFF:
740 case HWTSTAMP_TX_ON:
741 break;
742 default:
743 return -ERANGE;
744 }
745
746 switch (config.rx_filter) {
747 case HWTSTAMP_FILTER_NONE:
748 p->has_rx_tstamp = false;
749 rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
750 rxx_frm_ctl.s.ptp_mode = 0;
751 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
752 break;
753 case HWTSTAMP_FILTER_ALL:
754 case HWTSTAMP_FILTER_SOME:
755 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
756 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
757 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
758 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
759 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
760 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
761 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
762 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
763 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
764 case HWTSTAMP_FILTER_PTP_V2_EVENT:
765 case HWTSTAMP_FILTER_PTP_V2_SYNC:
766 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
767 p->has_rx_tstamp = have_hw_timestamps;
768 config.rx_filter = HWTSTAMP_FILTER_ALL;
769 if (p->has_rx_tstamp) {
770 rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
771 rxx_frm_ctl.s.ptp_mode = 1;
772 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
773 }
774 break;
775 default:
776 return -ERANGE;
777 }
778
779 if (copy_to_user(rq->ifr_data, &config, sizeof(config)))
780 return -EFAULT;
781
782 return 0;
783}
784
David Daneyd6aa60a2009-10-14 12:04:41 -0700785static int octeon_mgmt_ioctl(struct net_device *netdev,
786 struct ifreq *rq, int cmd)
787{
788 struct octeon_mgmt *p = netdev_priv(netdev);
789
Chad Reese3d305852012-08-21 11:45:07 -0700790 switch (cmd) {
791 case SIOCSHWTSTAMP:
792 return octeon_mgmt_ioctl_hwtstamp(netdev, rq, cmd);
793 default:
794 if (p->phydev)
795 return phy_mii_ioctl(p->phydev, rq, cmd);
David Daneyd6aa60a2009-10-14 12:04:41 -0700796 return -EINVAL;
Chad Reese3d305852012-08-21 11:45:07 -0700797 }
David Daneyd6aa60a2009-10-14 12:04:41 -0700798}
David Daneyd6aa60a2009-10-14 12:04:41 -0700799
David Daneyeeae05a2012-08-21 11:45:06 -0700800static void octeon_mgmt_disable_link(struct octeon_mgmt *p)
801{
802 union cvmx_agl_gmx_prtx_cfg prtx_cfg;
David Daneyd6aa60a2009-10-14 12:04:41 -0700803
David Daneyeeae05a2012-08-21 11:45:06 -0700804 /* Disable GMX before we make any changes. */
805 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
806 prtx_cfg.s.en = 0;
807 prtx_cfg.s.tx_en = 0;
808 prtx_cfg.s.rx_en = 0;
809 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
810
811 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
812 int i;
813 for (i = 0; i < 10; i++) {
814 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
815 if (prtx_cfg.s.tx_idle == 1 || prtx_cfg.s.rx_idle == 1)
816 break;
817 mdelay(1);
818 i++;
819 }
820 }
821}
822
823static void octeon_mgmt_enable_link(struct octeon_mgmt *p)
824{
825 union cvmx_agl_gmx_prtx_cfg prtx_cfg;
826
827 /* Restore the GMX enable state only if link is set */
828 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
829 prtx_cfg.s.tx_en = 1;
830 prtx_cfg.s.rx_en = 1;
831 prtx_cfg.s.en = 1;
832 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
833}
834
835static void octeon_mgmt_update_link(struct octeon_mgmt *p)
836{
837 union cvmx_agl_gmx_prtx_cfg prtx_cfg;
838
839 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
840
841 if (!p->phydev->link)
842 prtx_cfg.s.duplex = 1;
843 else
844 prtx_cfg.s.duplex = p->phydev->duplex;
845
846 switch (p->phydev->speed) {
847 case 10:
848 prtx_cfg.s.speed = 0;
849 prtx_cfg.s.slottime = 0;
850
851 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
852 prtx_cfg.s.burst = 1;
853 prtx_cfg.s.speed_msb = 1;
854 }
855 break;
856 case 100:
857 prtx_cfg.s.speed = 0;
858 prtx_cfg.s.slottime = 0;
859
860 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
861 prtx_cfg.s.burst = 1;
862 prtx_cfg.s.speed_msb = 0;
863 }
864 break;
865 case 1000:
866 /* 1000 MBits is only supported on 6XXX chips */
867 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
868 prtx_cfg.s.speed = 1;
869 prtx_cfg.s.speed_msb = 0;
870 /* Only matters for half-duplex */
871 prtx_cfg.s.slottime = 1;
872 prtx_cfg.s.burst = p->phydev->duplex;
873 }
874 break;
875 case 0: /* No link */
876 default:
877 break;
878 }
879
880 /* Write the new GMX setting with the port still disabled. */
881 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
882
883 /* Read GMX CFG again to make sure the config is completed. */
884 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
885
886 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
887 union cvmx_agl_gmx_txx_clk agl_clk;
888 union cvmx_agl_prtx_ctl prtx_ctl;
889
890 prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
891 agl_clk.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_CLK);
892 /* MII (both speeds) and RGMII 1000 speed. */
893 agl_clk.s.clk_cnt = 1;
894 if (prtx_ctl.s.mode == 0) { /* RGMII mode */
895 if (p->phydev->speed == 10)
896 agl_clk.s.clk_cnt = 50;
897 else if (p->phydev->speed == 100)
898 agl_clk.s.clk_cnt = 5;
899 }
900 cvmx_write_csr(p->agl + AGL_GMX_TX_CLK, agl_clk.u64);
901 }
David Daneyd6aa60a2009-10-14 12:04:41 -0700902}
903
904static void octeon_mgmt_adjust_link(struct net_device *netdev)
905{
906 struct octeon_mgmt *p = netdev_priv(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -0700907 unsigned long flags;
908 int link_changed = 0;
909
David Daneyeeae05a2012-08-21 11:45:06 -0700910 if (!p->phydev)
911 return;
912
David Daneyd6aa60a2009-10-14 12:04:41 -0700913 spin_lock_irqsave(&p->lock, flags);
David Daneyeeae05a2012-08-21 11:45:06 -0700914
915
916 if (!p->phydev->link && p->last_link)
917 link_changed = -1;
918
919 if (p->phydev->link
920 && (p->last_duplex != p->phydev->duplex
921 || p->last_link != p->phydev->link
922 || p->last_speed != p->phydev->speed)) {
923 octeon_mgmt_disable_link(p);
924 link_changed = 1;
925 octeon_mgmt_update_link(p);
926 octeon_mgmt_enable_link(p);
David Daneyd6aa60a2009-10-14 12:04:41 -0700927 }
David Daneyeeae05a2012-08-21 11:45:06 -0700928
David Daneyd6aa60a2009-10-14 12:04:41 -0700929 p->last_link = p->phydev->link;
David Daneyeeae05a2012-08-21 11:45:06 -0700930 p->last_speed = p->phydev->speed;
931 p->last_duplex = p->phydev->duplex;
932
David Daneyd6aa60a2009-10-14 12:04:41 -0700933 spin_unlock_irqrestore(&p->lock, flags);
934
935 if (link_changed != 0) {
936 if (link_changed > 0) {
David Daneyd6aa60a2009-10-14 12:04:41 -0700937 pr_info("%s: Link is up - %d/%s\n", netdev->name,
938 p->phydev->speed,
939 DUPLEX_FULL == p->phydev->duplex ?
940 "Full" : "Half");
941 } else {
David Daneyd6aa60a2009-10-14 12:04:41 -0700942 pr_info("%s: Link is down\n", netdev->name);
943 }
944 }
945}
946
947static int octeon_mgmt_init_phy(struct net_device *netdev)
948{
949 struct octeon_mgmt *p = netdev_priv(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -0700950
David Daney368bec02012-07-05 18:12:39 +0200951 if (octeon_is_simulation() || p->phy_np == NULL) {
David Daneyd6aa60a2009-10-14 12:04:41 -0700952 /* No PHYs in the simulator. */
953 netif_carrier_on(netdev);
954 return 0;
955 }
956
David Daney368bec02012-07-05 18:12:39 +0200957 p->phydev = of_phy_connect(netdev, p->phy_np,
958 octeon_mgmt_adjust_link, 0,
959 PHY_INTERFACE_MODE_MII);
David Daneyd6aa60a2009-10-14 12:04:41 -0700960
Wei Yongjundf555b62012-09-27 19:04:21 +0000961 if (!p->phydev)
David Daneyeeae05a2012-08-21 11:45:06 -0700962 return -ENODEV;
David Daneyd6aa60a2009-10-14 12:04:41 -0700963
964 return 0;
965}
966
967static int octeon_mgmt_open(struct net_device *netdev)
968{
969 struct octeon_mgmt *p = netdev_priv(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -0700970 union cvmx_mixx_ctl mix_ctl;
971 union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode;
972 union cvmx_mixx_oring1 oring1;
973 union cvmx_mixx_iring1 iring1;
David Daneyd6aa60a2009-10-14 12:04:41 -0700974 union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
975 union cvmx_mixx_irhwm mix_irhwm;
976 union cvmx_mixx_orhwm mix_orhwm;
977 union cvmx_mixx_intena mix_intena;
978 struct sockaddr sa;
979
980 /* Allocate ring buffers. */
981 p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
982 GFP_KERNEL);
983 if (!p->tx_ring)
984 return -ENOMEM;
985 p->tx_ring_handle =
986 dma_map_single(p->dev, p->tx_ring,
987 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
988 DMA_BIDIRECTIONAL);
989 p->tx_next = 0;
990 p->tx_next_clean = 0;
991 p->tx_current_fill = 0;
992
993
994 p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
995 GFP_KERNEL);
996 if (!p->rx_ring)
997 goto err_nomem;
998 p->rx_ring_handle =
999 dma_map_single(p->dev, p->rx_ring,
1000 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
1001 DMA_BIDIRECTIONAL);
1002
1003 p->rx_next = 0;
1004 p->rx_next_fill = 0;
1005 p->rx_current_fill = 0;
1006
1007 octeon_mgmt_reset_hw(p);
1008
David Daney368bec02012-07-05 18:12:39 +02001009 mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
David Daneyd6aa60a2009-10-14 12:04:41 -07001010
1011 /* Bring it out of reset if needed. */
1012 if (mix_ctl.s.reset) {
1013 mix_ctl.s.reset = 0;
David Daney368bec02012-07-05 18:12:39 +02001014 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -07001015 do {
David Daney368bec02012-07-05 18:12:39 +02001016 mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
David Daneyd6aa60a2009-10-14 12:04:41 -07001017 } while (mix_ctl.s.reset);
1018 }
1019
David Daneyeeae05a2012-08-21 11:45:06 -07001020 if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) {
1021 agl_gmx_inf_mode.u64 = 0;
1022 agl_gmx_inf_mode.s.en = 1;
1023 cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
1024 }
1025 if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
1026 || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
David Daneya0ce9b12012-08-21 11:45:12 -07001027 /* Force compensation values, as they are not
David Daneyeeae05a2012-08-21 11:45:06 -07001028 * determined properly by HW
1029 */
1030 union cvmx_agl_gmx_drv_ctl drv_ctl;
1031
1032 drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
1033 if (p->port) {
1034 drv_ctl.s.byp_en1 = 1;
1035 drv_ctl.s.nctl1 = 6;
1036 drv_ctl.s.pctl1 = 6;
1037 } else {
1038 drv_ctl.s.byp_en = 1;
1039 drv_ctl.s.nctl = 6;
1040 drv_ctl.s.pctl = 6;
1041 }
1042 cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64);
1043 }
David Daneyd6aa60a2009-10-14 12:04:41 -07001044
1045 oring1.u64 = 0;
1046 oring1.s.obase = p->tx_ring_handle >> 3;
1047 oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE;
David Daney368bec02012-07-05 18:12:39 +02001048 cvmx_write_csr(p->mix + MIX_ORING1, oring1.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -07001049
1050 iring1.u64 = 0;
1051 iring1.s.ibase = p->rx_ring_handle >> 3;
1052 iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE;
David Daney368bec02012-07-05 18:12:39 +02001053 cvmx_write_csr(p->mix + MIX_IRING1, iring1.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -07001054
David Daneyd6aa60a2009-10-14 12:04:41 -07001055 memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN);
1056 octeon_mgmt_set_mac_address(netdev, &sa);
1057
1058 octeon_mgmt_change_mtu(netdev, netdev->mtu);
1059
David Daneya0ce9b12012-08-21 11:45:12 -07001060 /* Enable the port HW. Packets are not allowed until
David Daneyd6aa60a2009-10-14 12:04:41 -07001061 * cvmx_mgmt_port_enable() is called.
1062 */
1063 mix_ctl.u64 = 0;
1064 mix_ctl.s.crc_strip = 1; /* Strip the ending CRC */
1065 mix_ctl.s.en = 1; /* Enable the port */
1066 mix_ctl.s.nbtarb = 0; /* Arbitration mode */
1067 /* MII CB-request FIFO programmable high watermark */
1068 mix_ctl.s.mrq_hwm = 1;
David Daneyeeae05a2012-08-21 11:45:06 -07001069#ifdef __LITTLE_ENDIAN
1070 mix_ctl.s.lendian = 1;
1071#endif
David Daney368bec02012-07-05 18:12:39 +02001072 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -07001073
David Daneyeeae05a2012-08-21 11:45:06 -07001074 /* Read the PHY to find the mode of the interface. */
1075 if (octeon_mgmt_init_phy(netdev)) {
1076 dev_err(p->dev, "Cannot initialize PHY on MIX%d.\n", p->port);
1077 goto err_noirq;
1078 }
David Daneyd6aa60a2009-10-14 12:04:41 -07001079
David Daneyeeae05a2012-08-21 11:45:06 -07001080 /* Set the mode of the interface, RGMII/MII. */
1081 if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && p->phydev) {
1082 union cvmx_agl_prtx_ctl agl_prtx_ctl;
1083 int rgmii_mode = (p->phydev->supported &
1084 (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)) != 0;
1085
1086 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1087 agl_prtx_ctl.s.mode = rgmii_mode ? 0 : 1;
1088 cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
1089
1090 /* MII clocks counts are based on the 125Mhz
1091 * reference, which has an 8nS period. So our delays
1092 * need to be multiplied by this factor.
1093 */
1094#define NS_PER_PHY_CLK 8
1095
1096 /* Take the DLL and clock tree out of reset */
1097 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1098 agl_prtx_ctl.s.clkrst = 0;
1099 if (rgmii_mode) {
1100 agl_prtx_ctl.s.dllrst = 0;
1101 agl_prtx_ctl.s.clktx_byp = 0;
David Daneyd6aa60a2009-10-14 12:04:41 -07001102 }
David Daneyeeae05a2012-08-21 11:45:06 -07001103 cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
1104 cvmx_read_csr(p->agl_prt_ctl); /* Force write out before wait */
1105
1106 /* Wait for the DLL to lock. External 125 MHz
1107 * reference clock must be stable at this point.
1108 */
1109 ndelay(256 * NS_PER_PHY_CLK);
1110
1111 /* Enable the interface */
1112 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1113 agl_prtx_ctl.s.enable = 1;
1114 cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
1115
1116 /* Read the value back to force the previous write */
1117 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1118
1119 /* Enable the compensation controller */
1120 agl_prtx_ctl.s.comp = 1;
1121 agl_prtx_ctl.s.drv_byp = 0;
1122 cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
1123 /* Force write out before wait. */
1124 cvmx_read_csr(p->agl_prt_ctl);
1125
1126 /* For compensation state to lock. */
1127 ndelay(1040 * NS_PER_PHY_CLK);
1128
David Daney906996d2013-06-19 17:40:19 -07001129 /* Default Interframe Gaps are too small. Recommended
1130 * workaround is.
1131 *
1132 * AGL_GMX_TX_IFG[IFG1]=14
1133 * AGL_GMX_TX_IFG[IFG2]=10
David Daneyeeae05a2012-08-21 11:45:06 -07001134 */
David Daney906996d2013-06-19 17:40:19 -07001135 cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0xae);
David Daneyd6aa60a2009-10-14 12:04:41 -07001136 }
1137
1138 octeon_mgmt_rx_fill_ring(netdev);
1139
1140 /* Clear statistics. */
1141 /* Clear on read. */
David Daney368bec02012-07-05 18:12:39 +02001142 cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_CTL, 1);
1143 cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP, 0);
1144 cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD, 0);
David Daneyd6aa60a2009-10-14 12:04:41 -07001145
David Daney368bec02012-07-05 18:12:39 +02001146 cvmx_write_csr(p->agl + AGL_GMX_TX_STATS_CTL, 1);
1147 cvmx_write_csr(p->agl + AGL_GMX_TX_STAT0, 0);
1148 cvmx_write_csr(p->agl + AGL_GMX_TX_STAT1, 0);
David Daneyd6aa60a2009-10-14 12:04:41 -07001149
1150 /* Clear any pending interrupts */
David Daney368bec02012-07-05 18:12:39 +02001151 cvmx_write_csr(p->mix + MIX_ISR, cvmx_read_csr(p->mix + MIX_ISR));
David Daneyd6aa60a2009-10-14 12:04:41 -07001152
1153 if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name,
1154 netdev)) {
1155 dev_err(p->dev, "request_irq(%d) failed.\n", p->irq);
1156 goto err_noirq;
1157 }
1158
1159 /* Interrupt every single RX packet */
1160 mix_irhwm.u64 = 0;
1161 mix_irhwm.s.irhwm = 0;
David Daney368bec02012-07-05 18:12:39 +02001162 cvmx_write_csr(p->mix + MIX_IRHWM, mix_irhwm.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -07001163
David Daneyb635e062010-05-05 13:03:11 +00001164 /* Interrupt when we have 1 or more packets to clean. */
David Daneyd6aa60a2009-10-14 12:04:41 -07001165 mix_orhwm.u64 = 0;
David Daneyeeae05a2012-08-21 11:45:06 -07001166 mix_orhwm.s.orhwm = 0;
David Daney368bec02012-07-05 18:12:39 +02001167 cvmx_write_csr(p->mix + MIX_ORHWM, mix_orhwm.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -07001168
1169 /* Enable receive and transmit interrupts */
1170 mix_intena.u64 = 0;
1171 mix_intena.s.ithena = 1;
1172 mix_intena.s.othena = 1;
David Daney368bec02012-07-05 18:12:39 +02001173 cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -07001174
David Daneyd6aa60a2009-10-14 12:04:41 -07001175 /* Enable packet I/O. */
1176
1177 rxx_frm_ctl.u64 = 0;
Chad Reese3d305852012-08-21 11:45:07 -07001178 rxx_frm_ctl.s.ptp_mode = p->has_rx_tstamp ? 1 : 0;
David Daneyd6aa60a2009-10-14 12:04:41 -07001179 rxx_frm_ctl.s.pre_align = 1;
David Daneya0ce9b12012-08-21 11:45:12 -07001180 /* When set, disables the length check for non-min sized pkts
David Daneyd6aa60a2009-10-14 12:04:41 -07001181 * with padding in the client data.
1182 */
1183 rxx_frm_ctl.s.pad_len = 1;
1184 /* When set, disables the length check for VLAN pkts */
1185 rxx_frm_ctl.s.vlan_len = 1;
1186 /* When set, PREAMBLE checking is less strict */
1187 rxx_frm_ctl.s.pre_free = 1;
1188 /* Control Pause Frames can match station SMAC */
1189 rxx_frm_ctl.s.ctl_smac = 0;
1190 /* Control Pause Frames can match globally assign Multicast address */
1191 rxx_frm_ctl.s.ctl_mcst = 1;
1192 /* Forward pause information to TX block */
1193 rxx_frm_ctl.s.ctl_bck = 1;
1194 /* Drop Control Pause Frames */
1195 rxx_frm_ctl.s.ctl_drp = 1;
1196 /* Strip off the preamble */
1197 rxx_frm_ctl.s.pre_strp = 1;
David Daneya0ce9b12012-08-21 11:45:12 -07001198 /* This port is configured to send PREAMBLE+SFD to begin every
David Daneyd6aa60a2009-10-14 12:04:41 -07001199 * frame. GMX checks that the PREAMBLE is sent correctly.
1200 */
1201 rxx_frm_ctl.s.pre_chk = 1;
David Daney368bec02012-07-05 18:12:39 +02001202 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -07001203
David Daneyeeae05a2012-08-21 11:45:06 -07001204 /* Configure the port duplex, speed and enables */
1205 octeon_mgmt_disable_link(p);
1206 if (p->phydev)
1207 octeon_mgmt_update_link(p);
1208 octeon_mgmt_enable_link(p);
David Daneyd6aa60a2009-10-14 12:04:41 -07001209
1210 p->last_link = 0;
David Daneyeeae05a2012-08-21 11:45:06 -07001211 p->last_speed = 0;
1212 /* PHY is not present in simulator. The carrier is enabled
1213 * while initializing the phy for simulator, leave it enabled.
1214 */
1215 if (p->phydev) {
1216 netif_carrier_off(netdev);
1217 phy_start_aneg(p->phydev);
David Daneyd6aa60a2009-10-14 12:04:41 -07001218 }
1219
1220 netif_wake_queue(netdev);
1221 napi_enable(&p->napi);
1222
1223 return 0;
1224err_noirq:
1225 octeon_mgmt_reset_hw(p);
1226 dma_unmap_single(p->dev, p->rx_ring_handle,
1227 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
1228 DMA_BIDIRECTIONAL);
1229 kfree(p->rx_ring);
1230err_nomem:
1231 dma_unmap_single(p->dev, p->tx_ring_handle,
1232 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
1233 DMA_BIDIRECTIONAL);
1234 kfree(p->tx_ring);
1235 return -ENOMEM;
1236}
1237
1238static int octeon_mgmt_stop(struct net_device *netdev)
1239{
1240 struct octeon_mgmt *p = netdev_priv(netdev);
1241
1242 napi_disable(&p->napi);
1243 netif_stop_queue(netdev);
1244
1245 if (p->phydev)
1246 phy_disconnect(p->phydev);
David Daneyeeae05a2012-08-21 11:45:06 -07001247 p->phydev = NULL;
David Daneyd6aa60a2009-10-14 12:04:41 -07001248
1249 netif_carrier_off(netdev);
1250
1251 octeon_mgmt_reset_hw(p);
1252
David Daneyd6aa60a2009-10-14 12:04:41 -07001253 free_irq(p->irq, netdev);
1254
1255 /* dma_unmap is a nop on Octeon, so just free everything. */
1256 skb_queue_purge(&p->tx_list);
1257 skb_queue_purge(&p->rx_list);
1258
1259 dma_unmap_single(p->dev, p->rx_ring_handle,
1260 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
1261 DMA_BIDIRECTIONAL);
1262 kfree(p->rx_ring);
1263
1264 dma_unmap_single(p->dev, p->tx_ring_handle,
1265 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
1266 DMA_BIDIRECTIONAL);
1267 kfree(p->tx_ring);
1268
David Daneyd6aa60a2009-10-14 12:04:41 -07001269 return 0;
1270}
1271
1272static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
1273{
1274 struct octeon_mgmt *p = netdev_priv(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -07001275 union mgmt_port_ring_entry re;
1276 unsigned long flags;
David Daney4e4a4f12010-05-05 13:03:12 +00001277 int rv = NETDEV_TX_BUSY;
David Daneyd6aa60a2009-10-14 12:04:41 -07001278
1279 re.d64 = 0;
Chad Reese3d305852012-08-21 11:45:07 -07001280 re.s.tstamp = ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) != 0);
David Daneyd6aa60a2009-10-14 12:04:41 -07001281 re.s.len = skb->len;
1282 re.s.addr = dma_map_single(p->dev, skb->data,
1283 skb->len,
1284 DMA_TO_DEVICE);
1285
1286 spin_lock_irqsave(&p->tx_list.lock, flags);
1287
David Daney4e4a4f12010-05-05 13:03:12 +00001288 if (unlikely(p->tx_current_fill >= ring_max_fill(OCTEON_MGMT_TX_RING_SIZE) - 1)) {
1289 spin_unlock_irqrestore(&p->tx_list.lock, flags);
1290 netif_stop_queue(netdev);
1291 spin_lock_irqsave(&p->tx_list.lock, flags);
1292 }
1293
David Daneyd6aa60a2009-10-14 12:04:41 -07001294 if (unlikely(p->tx_current_fill >=
1295 ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) {
1296 spin_unlock_irqrestore(&p->tx_list.lock, flags);
David Daneyd6aa60a2009-10-14 12:04:41 -07001297 dma_unmap_single(p->dev, re.s.addr, re.s.len,
1298 DMA_TO_DEVICE);
David Daney4e4a4f12010-05-05 13:03:12 +00001299 goto out;
David Daneyd6aa60a2009-10-14 12:04:41 -07001300 }
1301
1302 __skb_queue_tail(&p->tx_list, skb);
1303
1304 /* Put it in the ring. */
1305 p->tx_ring[p->tx_next] = re.d64;
1306 p->tx_next = (p->tx_next + 1) % OCTEON_MGMT_TX_RING_SIZE;
1307 p->tx_current_fill++;
1308
1309 spin_unlock_irqrestore(&p->tx_list.lock, flags);
1310
1311 dma_sync_single_for_device(p->dev, p->tx_ring_handle,
1312 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
1313 DMA_BIDIRECTIONAL);
1314
1315 netdev->stats.tx_packets++;
1316 netdev->stats.tx_bytes += skb->len;
1317
1318 /* Ring the bell. */
David Daney368bec02012-07-05 18:12:39 +02001319 cvmx_write_csr(p->mix + MIX_ORING2, 1);
David Daneyd6aa60a2009-10-14 12:04:41 -07001320
David Daneyeeae05a2012-08-21 11:45:06 -07001321 netdev->trans_start = jiffies;
David Daney4e4a4f12010-05-05 13:03:12 +00001322 rv = NETDEV_TX_OK;
1323out:
David Daneyd6aa60a2009-10-14 12:04:41 -07001324 octeon_mgmt_update_tx_stats(netdev);
David Daney4e4a4f12010-05-05 13:03:12 +00001325 return rv;
David Daneyd6aa60a2009-10-14 12:04:41 -07001326}
1327
1328#ifdef CONFIG_NET_POLL_CONTROLLER
1329static void octeon_mgmt_poll_controller(struct net_device *netdev)
1330{
1331 struct octeon_mgmt *p = netdev_priv(netdev);
1332
1333 octeon_mgmt_receive_packets(p, 16);
1334 octeon_mgmt_update_rx_stats(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -07001335}
1336#endif
1337
1338static void octeon_mgmt_get_drvinfo(struct net_device *netdev,
1339 struct ethtool_drvinfo *info)
1340{
Jiri Pirko7826d432013-01-06 00:44:26 +00001341 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1342 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1343 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
1344 strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
David Daneyd6aa60a2009-10-14 12:04:41 -07001345 info->n_stats = 0;
1346 info->testinfo_len = 0;
1347 info->regdump_len = 0;
1348 info->eedump_len = 0;
1349}
1350
1351static int octeon_mgmt_get_settings(struct net_device *netdev,
1352 struct ethtool_cmd *cmd)
1353{
1354 struct octeon_mgmt *p = netdev_priv(netdev);
1355
1356 if (p->phydev)
1357 return phy_ethtool_gset(p->phydev, cmd);
1358
David Daneyf21105d2012-08-21 11:45:08 -07001359 return -EOPNOTSUPP;
David Daneyd6aa60a2009-10-14 12:04:41 -07001360}
1361
1362static int octeon_mgmt_set_settings(struct net_device *netdev,
1363 struct ethtool_cmd *cmd)
1364{
1365 struct octeon_mgmt *p = netdev_priv(netdev);
1366
1367 if (!capable(CAP_NET_ADMIN))
1368 return -EPERM;
1369
1370 if (p->phydev)
1371 return phy_ethtool_sset(p->phydev, cmd);
1372
David Daneyf21105d2012-08-21 11:45:08 -07001373 return -EOPNOTSUPP;
1374}
1375
1376static int octeon_mgmt_nway_reset(struct net_device *dev)
1377{
1378 struct octeon_mgmt *p = netdev_priv(dev);
1379
1380 if (!capable(CAP_NET_ADMIN))
1381 return -EPERM;
1382
1383 if (p->phydev)
1384 return phy_start_aneg(p->phydev);
1385
1386 return -EOPNOTSUPP;
David Daneyd6aa60a2009-10-14 12:04:41 -07001387}
1388
1389static const struct ethtool_ops octeon_mgmt_ethtool_ops = {
1390 .get_drvinfo = octeon_mgmt_get_drvinfo,
David Daneyd6aa60a2009-10-14 12:04:41 -07001391 .get_settings = octeon_mgmt_get_settings,
David Daneyf21105d2012-08-21 11:45:08 -07001392 .set_settings = octeon_mgmt_set_settings,
1393 .nway_reset = octeon_mgmt_nway_reset,
1394 .get_link = ethtool_op_get_link,
David Daneyd6aa60a2009-10-14 12:04:41 -07001395};
1396
1397static const struct net_device_ops octeon_mgmt_ops = {
1398 .ndo_open = octeon_mgmt_open,
1399 .ndo_stop = octeon_mgmt_stop,
1400 .ndo_start_xmit = octeon_mgmt_xmit,
David Daneyeeae05a2012-08-21 11:45:06 -07001401 .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering,
David Daneyd6aa60a2009-10-14 12:04:41 -07001402 .ndo_set_mac_address = octeon_mgmt_set_mac_address,
David Daneyeeae05a2012-08-21 11:45:06 -07001403 .ndo_do_ioctl = octeon_mgmt_ioctl,
David Daneyd6aa60a2009-10-14 12:04:41 -07001404 .ndo_change_mtu = octeon_mgmt_change_mtu,
1405#ifdef CONFIG_NET_POLL_CONTROLLER
1406 .ndo_poll_controller = octeon_mgmt_poll_controller,
1407#endif
1408};
1409
Bill Pemberton5bc7ec72012-12-03 09:23:22 -05001410static int octeon_mgmt_probe(struct platform_device *pdev)
David Daneyd6aa60a2009-10-14 12:04:41 -07001411{
David Daneyd6aa60a2009-10-14 12:04:41 -07001412 struct net_device *netdev;
1413 struct octeon_mgmt *p;
David Daney368bec02012-07-05 18:12:39 +02001414 const __be32 *data;
1415 const u8 *mac;
1416 struct resource *res_mix;
1417 struct resource *res_agl;
David Daneyeeae05a2012-08-21 11:45:06 -07001418 struct resource *res_agl_prt_ctl;
David Daney368bec02012-07-05 18:12:39 +02001419 int len;
1420 int result;
David Daneyd6aa60a2009-10-14 12:04:41 -07001421
1422 netdev = alloc_etherdev(sizeof(struct octeon_mgmt));
1423 if (netdev == NULL)
1424 return -ENOMEM;
1425
David Daney052958e2012-08-21 11:45:09 -07001426 SET_NETDEV_DEV(netdev, &pdev->dev);
1427
Jingoo Han8513fbd2013-05-23 00:52:31 +00001428 platform_set_drvdata(pdev, netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -07001429 p = netdev_priv(netdev);
1430 netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll,
1431 OCTEON_MGMT_NAPI_WEIGHT);
1432
1433 p->netdev = netdev;
1434 p->dev = &pdev->dev;
Chad Reese3d305852012-08-21 11:45:07 -07001435 p->has_rx_tstamp = false;
David Daneyd6aa60a2009-10-14 12:04:41 -07001436
David Daney368bec02012-07-05 18:12:39 +02001437 data = of_get_property(pdev->dev.of_node, "cell-index", &len);
1438 if (data && len == sizeof(*data)) {
1439 p->port = be32_to_cpup(data);
1440 } else {
1441 dev_err(&pdev->dev, "no 'cell-index' property\n");
1442 result = -ENXIO;
1443 goto err;
1444 }
1445
David Daneyd6aa60a2009-10-14 12:04:41 -07001446 snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port);
1447
David Daney368bec02012-07-05 18:12:39 +02001448 result = platform_get_irq(pdev, 0);
1449 if (result < 0)
David Daneyd6aa60a2009-10-14 12:04:41 -07001450 goto err;
1451
David Daney368bec02012-07-05 18:12:39 +02001452 p->irq = result;
1453
1454 res_mix = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1455 if (res_mix == NULL) {
1456 dev_err(&pdev->dev, "no 'reg' resource\n");
1457 result = -ENXIO;
1458 goto err;
1459 }
1460
1461 res_agl = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1462 if (res_agl == NULL) {
1463 dev_err(&pdev->dev, "no 'reg' resource\n");
1464 result = -ENXIO;
1465 goto err;
1466 }
1467
David Daneyeeae05a2012-08-21 11:45:06 -07001468 res_agl_prt_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 3);
1469 if (res_agl_prt_ctl == NULL) {
1470 dev_err(&pdev->dev, "no 'reg' resource\n");
1471 result = -ENXIO;
1472 goto err;
1473 }
1474
David Daney368bec02012-07-05 18:12:39 +02001475 p->mix_phys = res_mix->start;
1476 p->mix_size = resource_size(res_mix);
1477 p->agl_phys = res_agl->start;
1478 p->agl_size = resource_size(res_agl);
David Daneyeeae05a2012-08-21 11:45:06 -07001479 p->agl_prt_ctl_phys = res_agl_prt_ctl->start;
1480 p->agl_prt_ctl_size = resource_size(res_agl_prt_ctl);
David Daney368bec02012-07-05 18:12:39 +02001481
1482
1483 if (!devm_request_mem_region(&pdev->dev, p->mix_phys, p->mix_size,
1484 res_mix->name)) {
1485 dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
1486 res_mix->name);
1487 result = -ENXIO;
1488 goto err;
1489 }
1490
1491 if (!devm_request_mem_region(&pdev->dev, p->agl_phys, p->agl_size,
1492 res_agl->name)) {
1493 result = -ENXIO;
1494 dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
1495 res_agl->name);
1496 goto err;
1497 }
1498
David Daneyeeae05a2012-08-21 11:45:06 -07001499 if (!devm_request_mem_region(&pdev->dev, p->agl_prt_ctl_phys,
1500 p->agl_prt_ctl_size, res_agl_prt_ctl->name)) {
1501 result = -ENXIO;
1502 dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
1503 res_agl_prt_ctl->name);
1504 goto err;
1505 }
David Daney368bec02012-07-05 18:12:39 +02001506
1507 p->mix = (u64)devm_ioremap(&pdev->dev, p->mix_phys, p->mix_size);
1508 p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size);
David Daneyeeae05a2012-08-21 11:45:06 -07001509 p->agl_prt_ctl = (u64)devm_ioremap(&pdev->dev, p->agl_prt_ctl_phys,
1510 p->agl_prt_ctl_size);
David Daneyd6aa60a2009-10-14 12:04:41 -07001511 spin_lock_init(&p->lock);
1512
1513 skb_queue_head_init(&p->tx_list);
1514 skb_queue_head_init(&p->rx_list);
1515 tasklet_init(&p->tx_clean_tasklet,
1516 octeon_mgmt_clean_tx_tasklet, (unsigned long)p);
1517
Jiri Pirko01789342011-08-16 06:29:00 +00001518 netdev->priv_flags |= IFF_UNICAST_FLT;
1519
David Daneyd6aa60a2009-10-14 12:04:41 -07001520 netdev->netdev_ops = &octeon_mgmt_ops;
1521 netdev->ethtool_ops = &octeon_mgmt_ethtool_ops;
1522
David Daney368bec02012-07-05 18:12:39 +02001523 mac = of_get_mac_address(pdev->dev.of_node);
David Daneyd6aa60a2009-10-14 12:04:41 -07001524
Luka Perkov09ec0d02013-10-30 00:09:12 +01001525 if (mac)
David Daneyf3212382012-08-21 11:45:10 -07001526 memcpy(netdev->dev_addr, mac, ETH_ALEN);
Jiri Pirko15c6ff32013-01-01 03:30:17 +00001527 else
David Daneyf3212382012-08-21 11:45:10 -07001528 eth_hw_addr_random(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -07001529
David Daney368bec02012-07-05 18:12:39 +02001530 p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
1531
Russell King26741a62013-06-27 13:57:32 +01001532 result = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1533 if (result)
1534 goto err;
David Daney368bec02012-07-05 18:12:39 +02001535
David Daneyeeae05a2012-08-21 11:45:06 -07001536 netif_carrier_off(netdev);
David Daney368bec02012-07-05 18:12:39 +02001537 result = register_netdev(netdev);
1538 if (result)
David Daneyd6aa60a2009-10-14 12:04:41 -07001539 goto err;
1540
1541 dev_info(&pdev->dev, "Version " DRV_VERSION "\n");
1542 return 0;
David Daney368bec02012-07-05 18:12:39 +02001543
David Daneyd6aa60a2009-10-14 12:04:41 -07001544err:
1545 free_netdev(netdev);
David Daney368bec02012-07-05 18:12:39 +02001546 return result;
David Daneyd6aa60a2009-10-14 12:04:41 -07001547}
1548
Bill Pemberton5bc7ec72012-12-03 09:23:22 -05001549static int octeon_mgmt_remove(struct platform_device *pdev)
David Daneyd6aa60a2009-10-14 12:04:41 -07001550{
Jingoo Han8513fbd2013-05-23 00:52:31 +00001551 struct net_device *netdev = platform_get_drvdata(pdev);
David Daneyd6aa60a2009-10-14 12:04:41 -07001552
1553 unregister_netdev(netdev);
1554 free_netdev(netdev);
1555 return 0;
1556}
1557
David Daney368bec02012-07-05 18:12:39 +02001558static struct of_device_id octeon_mgmt_match[] = {
1559 {
1560 .compatible = "cavium,octeon-5750-mix",
1561 },
1562 {},
1563};
1564MODULE_DEVICE_TABLE(of, octeon_mgmt_match);
1565
David Daneyd6aa60a2009-10-14 12:04:41 -07001566static struct platform_driver octeon_mgmt_driver = {
1567 .driver = {
1568 .name = "octeon_mgmt",
1569 .owner = THIS_MODULE,
David Daney368bec02012-07-05 18:12:39 +02001570 .of_match_table = octeon_mgmt_match,
David Daneyd6aa60a2009-10-14 12:04:41 -07001571 },
1572 .probe = octeon_mgmt_probe,
Bill Pemberton5bc7ec72012-12-03 09:23:22 -05001573 .remove = octeon_mgmt_remove,
David Daneyd6aa60a2009-10-14 12:04:41 -07001574};
1575
1576extern void octeon_mdiobus_force_mod_depencency(void);
1577
1578static int __init octeon_mgmt_mod_init(void)
1579{
1580 /* Force our mdiobus driver module to be loaded first. */
1581 octeon_mdiobus_force_mod_depencency();
1582 return platform_driver_register(&octeon_mgmt_driver);
1583}
1584
1585static void __exit octeon_mgmt_mod_exit(void)
1586{
1587 platform_driver_unregister(&octeon_mgmt_driver);
1588}
1589
1590module_init(octeon_mgmt_mod_init);
1591module_exit(octeon_mgmt_mod_exit);
1592
1593MODULE_DESCRIPTION(DRV_DESCRIPTION);
1594MODULE_AUTHOR("David Daney");
1595MODULE_LICENSE("GPL");
1596MODULE_VERSION(DRV_VERSION);