blob: c42bbb16cdaebdeb7147dc887705abcfe91e86e0 [file] [log] [blame]
David Daneyd6aa60a2009-10-14 12:04:41 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2009 Cavium Networks
7 */
8
David Daneyd6aa60a2009-10-14 12:04:41 -07009#include <linux/platform_device.h>
David Daney368bec02012-07-05 18:12:39 +020010#include <linux/dma-mapping.h>
David Daneyd6aa60a2009-10-14 12:04:41 -070011#include <linux/etherdevice.h>
David Daney368bec02012-07-05 18:12:39 +020012#include <linux/capability.h>
13#include <linux/interrupt.h>
14#include <linux/netdevice.h>
15#include <linux/spinlock.h>
David Daneyd6aa60a2009-10-14 12:04:41 -070016#include <linux/if_vlan.h>
David Daney368bec02012-07-05 18:12:39 +020017#include <linux/of_mdio.h>
18#include <linux/module.h>
19#include <linux/of_net.h>
20#include <linux/init.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090021#include <linux/slab.h>
David Daneyd6aa60a2009-10-14 12:04:41 -070022#include <linux/phy.h>
David Daney368bec02012-07-05 18:12:39 +020023#include <linux/io.h>
David Daneyd6aa60a2009-10-14 12:04:41 -070024
25#include <asm/octeon/octeon.h>
26#include <asm/octeon/cvmx-mixx-defs.h>
27#include <asm/octeon/cvmx-agl-defs.h>
28
29#define DRV_NAME "octeon_mgmt"
30#define DRV_VERSION "2.0"
31#define DRV_DESCRIPTION \
32 "Cavium Networks Octeon MII (management) port Network Driver"
33
34#define OCTEON_MGMT_NAPI_WEIGHT 16
35
36/*
37 * Ring sizes that are powers of two allow for more efficient modulo
38 * opertions.
39 */
40#define OCTEON_MGMT_RX_RING_SIZE 512
41#define OCTEON_MGMT_TX_RING_SIZE 128
42
43/* Allow 8 bytes for vlan and FCS. */
44#define OCTEON_MGMT_RX_HEADROOM (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
45
46union mgmt_port_ring_entry {
47 u64 d64;
48 struct {
49 u64 reserved_62_63:2;
50 /* Length of the buffer/packet in bytes */
51 u64 len:14;
52 /* For TX, signals that the packet should be timestamped */
53 u64 tstamp:1;
54 /* The RX error code */
55 u64 code:7;
56#define RING_ENTRY_CODE_DONE 0xf
57#define RING_ENTRY_CODE_MORE 0x10
58 /* Physical address of the buffer */
59 u64 addr:40;
60 } s;
61};
62
David Daney368bec02012-07-05 18:12:39 +020063#define MIX_ORING1 0x0
64#define MIX_ORING2 0x8
65#define MIX_IRING1 0x10
66#define MIX_IRING2 0x18
67#define MIX_CTL 0x20
68#define MIX_IRHWM 0x28
69#define MIX_IRCNT 0x30
70#define MIX_ORHWM 0x38
71#define MIX_ORCNT 0x40
72#define MIX_ISR 0x48
73#define MIX_INTENA 0x50
74#define MIX_REMCNT 0x58
75#define MIX_BIST 0x78
76
77#define AGL_GMX_PRT_CFG 0x10
78#define AGL_GMX_RX_FRM_CTL 0x18
79#define AGL_GMX_RX_FRM_MAX 0x30
80#define AGL_GMX_RX_JABBER 0x38
81#define AGL_GMX_RX_STATS_CTL 0x50
82
83#define AGL_GMX_RX_STATS_PKTS_DRP 0xb0
84#define AGL_GMX_RX_STATS_OCTS_DRP 0xb8
85#define AGL_GMX_RX_STATS_PKTS_BAD 0xc0
86
87#define AGL_GMX_RX_ADR_CTL 0x100
88#define AGL_GMX_RX_ADR_CAM_EN 0x108
89#define AGL_GMX_RX_ADR_CAM0 0x180
90#define AGL_GMX_RX_ADR_CAM1 0x188
91#define AGL_GMX_RX_ADR_CAM2 0x190
92#define AGL_GMX_RX_ADR_CAM3 0x198
93#define AGL_GMX_RX_ADR_CAM4 0x1a0
94#define AGL_GMX_RX_ADR_CAM5 0x1a8
95
96#define AGL_GMX_TX_STATS_CTL 0x268
97#define AGL_GMX_TX_CTL 0x270
98#define AGL_GMX_TX_STAT0 0x280
99#define AGL_GMX_TX_STAT1 0x288
100#define AGL_GMX_TX_STAT2 0x290
101#define AGL_GMX_TX_STAT3 0x298
102#define AGL_GMX_TX_STAT4 0x2a0
103#define AGL_GMX_TX_STAT5 0x2a8
104#define AGL_GMX_TX_STAT6 0x2b0
105#define AGL_GMX_TX_STAT7 0x2b8
106#define AGL_GMX_TX_STAT8 0x2c0
107#define AGL_GMX_TX_STAT9 0x2c8
108
David Daneyd6aa60a2009-10-14 12:04:41 -0700109struct octeon_mgmt {
110 struct net_device *netdev;
David Daney368bec02012-07-05 18:12:39 +0200111 u64 mix;
112 u64 agl;
David Daneyd6aa60a2009-10-14 12:04:41 -0700113 int port;
114 int irq;
115 u64 *tx_ring;
116 dma_addr_t tx_ring_handle;
117 unsigned int tx_next;
118 unsigned int tx_next_clean;
119 unsigned int tx_current_fill;
120 /* The tx_list lock also protects the ring related variables */
121 struct sk_buff_head tx_list;
122
123 /* RX variables only touched in napi_poll. No locking necessary. */
124 u64 *rx_ring;
125 dma_addr_t rx_ring_handle;
126 unsigned int rx_next;
127 unsigned int rx_next_fill;
128 unsigned int rx_current_fill;
129 struct sk_buff_head rx_list;
130
131 spinlock_t lock;
132 unsigned int last_duplex;
133 unsigned int last_link;
134 struct device *dev;
135 struct napi_struct napi;
136 struct tasklet_struct tx_clean_tasklet;
137 struct phy_device *phydev;
David Daney368bec02012-07-05 18:12:39 +0200138 struct device_node *phy_np;
139 resource_size_t mix_phys;
140 resource_size_t mix_size;
141 resource_size_t agl_phys;
142 resource_size_t agl_size;
David Daneyd6aa60a2009-10-14 12:04:41 -0700143};
144
145static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable)
146{
David Daneyd6aa60a2009-10-14 12:04:41 -0700147 union cvmx_mixx_intena mix_intena;
148 unsigned long flags;
149
150 spin_lock_irqsave(&p->lock, flags);
David Daney368bec02012-07-05 18:12:39 +0200151 mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA);
David Daneyd6aa60a2009-10-14 12:04:41 -0700152 mix_intena.s.ithena = enable ? 1 : 0;
David Daney368bec02012-07-05 18:12:39 +0200153 cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700154 spin_unlock_irqrestore(&p->lock, flags);
155}
156
157static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable)
158{
David Daneyd6aa60a2009-10-14 12:04:41 -0700159 union cvmx_mixx_intena mix_intena;
160 unsigned long flags;
161
162 spin_lock_irqsave(&p->lock, flags);
David Daney368bec02012-07-05 18:12:39 +0200163 mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA);
David Daneyd6aa60a2009-10-14 12:04:41 -0700164 mix_intena.s.othena = enable ? 1 : 0;
David Daney368bec02012-07-05 18:12:39 +0200165 cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700166 spin_unlock_irqrestore(&p->lock, flags);
167}
168
169static inline void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p)
170{
171 octeon_mgmt_set_rx_irq(p, 1);
172}
173
174static inline void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p)
175{
176 octeon_mgmt_set_rx_irq(p, 0);
177}
178
179static inline void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p)
180{
181 octeon_mgmt_set_tx_irq(p, 1);
182}
183
184static inline void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p)
185{
186 octeon_mgmt_set_tx_irq(p, 0);
187}
188
189static unsigned int ring_max_fill(unsigned int ring_size)
190{
191 return ring_size - 8;
192}
193
194static unsigned int ring_size_to_bytes(unsigned int ring_size)
195{
196 return ring_size * sizeof(union mgmt_port_ring_entry);
197}
198
199static void octeon_mgmt_rx_fill_ring(struct net_device *netdev)
200{
201 struct octeon_mgmt *p = netdev_priv(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -0700202
203 while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) {
204 unsigned int size;
205 union mgmt_port_ring_entry re;
206 struct sk_buff *skb;
207
208 /* CN56XX pass 1 needs 8 bytes of padding. */
209 size = netdev->mtu + OCTEON_MGMT_RX_HEADROOM + 8 + NET_IP_ALIGN;
210
211 skb = netdev_alloc_skb(netdev, size);
212 if (!skb)
213 break;
214 skb_reserve(skb, NET_IP_ALIGN);
215 __skb_queue_tail(&p->rx_list, skb);
216
217 re.d64 = 0;
218 re.s.len = size;
219 re.s.addr = dma_map_single(p->dev, skb->data,
220 size,
221 DMA_FROM_DEVICE);
222
223 /* Put it in the ring. */
224 p->rx_ring[p->rx_next_fill] = re.d64;
225 dma_sync_single_for_device(p->dev, p->rx_ring_handle,
226 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
227 DMA_BIDIRECTIONAL);
228 p->rx_next_fill =
229 (p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE;
230 p->rx_current_fill++;
231 /* Ring the bell. */
David Daney368bec02012-07-05 18:12:39 +0200232 cvmx_write_csr(p->mix + MIX_IRING2, 1);
David Daneyd6aa60a2009-10-14 12:04:41 -0700233 }
234}
235
236static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
237{
David Daneyd6aa60a2009-10-14 12:04:41 -0700238 union cvmx_mixx_orcnt mix_orcnt;
239 union mgmt_port_ring_entry re;
240 struct sk_buff *skb;
241 int cleaned = 0;
242 unsigned long flags;
243
David Daney368bec02012-07-05 18:12:39 +0200244 mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
David Daneyd6aa60a2009-10-14 12:04:41 -0700245 while (mix_orcnt.s.orcnt) {
David Daney4d30b802010-05-05 13:03:09 +0000246 spin_lock_irqsave(&p->tx_list.lock, flags);
247
David Daney368bec02012-07-05 18:12:39 +0200248 mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
David Daney4d30b802010-05-05 13:03:09 +0000249
250 if (mix_orcnt.s.orcnt == 0) {
251 spin_unlock_irqrestore(&p->tx_list.lock, flags);
252 break;
253 }
254
David Daneyd6aa60a2009-10-14 12:04:41 -0700255 dma_sync_single_for_cpu(p->dev, p->tx_ring_handle,
256 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
257 DMA_BIDIRECTIONAL);
258
David Daneyd6aa60a2009-10-14 12:04:41 -0700259 re.d64 = p->tx_ring[p->tx_next_clean];
260 p->tx_next_clean =
261 (p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE;
262 skb = __skb_dequeue(&p->tx_list);
263
264 mix_orcnt.u64 = 0;
265 mix_orcnt.s.orcnt = 1;
266
267 /* Acknowledge to hardware that we have the buffer. */
David Daney368bec02012-07-05 18:12:39 +0200268 cvmx_write_csr(p->mix + MIX_ORCNT, mix_orcnt.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700269 p->tx_current_fill--;
270
271 spin_unlock_irqrestore(&p->tx_list.lock, flags);
272
273 dma_unmap_single(p->dev, re.s.addr, re.s.len,
274 DMA_TO_DEVICE);
275 dev_kfree_skb_any(skb);
276 cleaned++;
277
David Daney368bec02012-07-05 18:12:39 +0200278 mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
David Daneyd6aa60a2009-10-14 12:04:41 -0700279 }
280
281 if (cleaned && netif_queue_stopped(p->netdev))
282 netif_wake_queue(p->netdev);
283}
284
285static void octeon_mgmt_clean_tx_tasklet(unsigned long arg)
286{
287 struct octeon_mgmt *p = (struct octeon_mgmt *)arg;
288 octeon_mgmt_clean_tx_buffers(p);
289 octeon_mgmt_enable_tx_irq(p);
290}
291
292static void octeon_mgmt_update_rx_stats(struct net_device *netdev)
293{
294 struct octeon_mgmt *p = netdev_priv(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -0700295 unsigned long flags;
296 u64 drop, bad;
297
298 /* These reads also clear the count registers. */
David Daney368bec02012-07-05 18:12:39 +0200299 drop = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP);
300 bad = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD);
David Daneyd6aa60a2009-10-14 12:04:41 -0700301
302 if (drop || bad) {
303 /* Do an atomic update. */
304 spin_lock_irqsave(&p->lock, flags);
305 netdev->stats.rx_errors += bad;
306 netdev->stats.rx_dropped += drop;
307 spin_unlock_irqrestore(&p->lock, flags);
308 }
309}
310
311static void octeon_mgmt_update_tx_stats(struct net_device *netdev)
312{
313 struct octeon_mgmt *p = netdev_priv(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -0700314 unsigned long flags;
315
316 union cvmx_agl_gmx_txx_stat0 s0;
317 union cvmx_agl_gmx_txx_stat1 s1;
318
319 /* These reads also clear the count registers. */
David Daney368bec02012-07-05 18:12:39 +0200320 s0.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT0);
321 s1.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT1);
David Daneyd6aa60a2009-10-14 12:04:41 -0700322
323 if (s0.s.xsdef || s0.s.xscol || s1.s.scol || s1.s.mcol) {
324 /* Do an atomic update. */
325 spin_lock_irqsave(&p->lock, flags);
326 netdev->stats.tx_errors += s0.s.xsdef + s0.s.xscol;
327 netdev->stats.collisions += s1.s.scol + s1.s.mcol;
328 spin_unlock_irqrestore(&p->lock, flags);
329 }
330}
331
332/*
333 * Dequeue a receive skb and its corresponding ring entry. The ring
334 * entry is returned, *pskb is updated to point to the skb.
335 */
336static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p,
337 struct sk_buff **pskb)
338{
339 union mgmt_port_ring_entry re;
340
341 dma_sync_single_for_cpu(p->dev, p->rx_ring_handle,
342 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
343 DMA_BIDIRECTIONAL);
344
345 re.d64 = p->rx_ring[p->rx_next];
346 p->rx_next = (p->rx_next + 1) % OCTEON_MGMT_RX_RING_SIZE;
347 p->rx_current_fill--;
348 *pskb = __skb_dequeue(&p->rx_list);
349
350 dma_unmap_single(p->dev, re.s.addr,
351 ETH_FRAME_LEN + OCTEON_MGMT_RX_HEADROOM,
352 DMA_FROM_DEVICE);
353
354 return re.d64;
355}
356
357
358static int octeon_mgmt_receive_one(struct octeon_mgmt *p)
359{
David Daneyd6aa60a2009-10-14 12:04:41 -0700360 struct net_device *netdev = p->netdev;
361 union cvmx_mixx_ircnt mix_ircnt;
362 union mgmt_port_ring_entry re;
363 struct sk_buff *skb;
364 struct sk_buff *skb2;
365 struct sk_buff *skb_new;
366 union mgmt_port_ring_entry re2;
367 int rc = 1;
368
369
370 re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb);
371 if (likely(re.s.code == RING_ENTRY_CODE_DONE)) {
372 /* A good packet, send it up. */
373 skb_put(skb, re.s.len);
374good:
375 skb->protocol = eth_type_trans(skb, netdev);
376 netdev->stats.rx_packets++;
377 netdev->stats.rx_bytes += skb->len;
David Daneyd6aa60a2009-10-14 12:04:41 -0700378 netif_receive_skb(skb);
379 rc = 0;
380 } else if (re.s.code == RING_ENTRY_CODE_MORE) {
381 /*
382 * Packet split across skbs. This can happen if we
383 * increase the MTU. Buffers that are already in the
384 * rx ring can then end up being too small. As the rx
385 * ring is refilled, buffers sized for the new MTU
386 * will be used and we should go back to the normal
387 * non-split case.
388 */
389 skb_put(skb, re.s.len);
390 do {
391 re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
392 if (re2.s.code != RING_ENTRY_CODE_MORE
393 && re2.s.code != RING_ENTRY_CODE_DONE)
394 goto split_error;
395 skb_put(skb2, re2.s.len);
396 skb_new = skb_copy_expand(skb, 0, skb2->len,
397 GFP_ATOMIC);
398 if (!skb_new)
399 goto split_error;
400 if (skb_copy_bits(skb2, 0, skb_tail_pointer(skb_new),
401 skb2->len))
402 goto split_error;
403 skb_put(skb_new, skb2->len);
404 dev_kfree_skb_any(skb);
405 dev_kfree_skb_any(skb2);
406 skb = skb_new;
407 } while (re2.s.code == RING_ENTRY_CODE_MORE);
408 goto good;
409 } else {
410 /* Some other error, discard it. */
411 dev_kfree_skb_any(skb);
412 /*
413 * Error statistics are accumulated in
414 * octeon_mgmt_update_rx_stats.
415 */
416 }
417 goto done;
418split_error:
419 /* Discard the whole mess. */
420 dev_kfree_skb_any(skb);
421 dev_kfree_skb_any(skb2);
422 while (re2.s.code == RING_ENTRY_CODE_MORE) {
423 re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
424 dev_kfree_skb_any(skb2);
425 }
426 netdev->stats.rx_errors++;
427
428done:
429 /* Tell the hardware we processed a packet. */
430 mix_ircnt.u64 = 0;
431 mix_ircnt.s.ircnt = 1;
David Daney368bec02012-07-05 18:12:39 +0200432 cvmx_write_csr(p->mix + MIX_IRCNT, mix_ircnt.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700433 return rc;
David Daneyd6aa60a2009-10-14 12:04:41 -0700434}
435
436static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget)
437{
David Daneyd6aa60a2009-10-14 12:04:41 -0700438 unsigned int work_done = 0;
439 union cvmx_mixx_ircnt mix_ircnt;
440 int rc;
441
David Daney368bec02012-07-05 18:12:39 +0200442 mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT);
David Daneyd6aa60a2009-10-14 12:04:41 -0700443 while (work_done < budget && mix_ircnt.s.ircnt) {
444
445 rc = octeon_mgmt_receive_one(p);
446 if (!rc)
447 work_done++;
448
449 /* Check for more packets. */
David Daney368bec02012-07-05 18:12:39 +0200450 mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT);
David Daneyd6aa60a2009-10-14 12:04:41 -0700451 }
452
453 octeon_mgmt_rx_fill_ring(p->netdev);
454
455 return work_done;
456}
457
458static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget)
459{
460 struct octeon_mgmt *p = container_of(napi, struct octeon_mgmt, napi);
461 struct net_device *netdev = p->netdev;
462 unsigned int work_done = 0;
463
464 work_done = octeon_mgmt_receive_packets(p, budget);
465
466 if (work_done < budget) {
467 /* We stopped because no more packets were available. */
468 napi_complete(napi);
469 octeon_mgmt_enable_rx_irq(p);
470 }
471 octeon_mgmt_update_rx_stats(netdev);
472
473 return work_done;
474}
475
476/* Reset the hardware to clean state. */
477static void octeon_mgmt_reset_hw(struct octeon_mgmt *p)
478{
479 union cvmx_mixx_ctl mix_ctl;
480 union cvmx_mixx_bist mix_bist;
481 union cvmx_agl_gmx_bist agl_gmx_bist;
482
483 mix_ctl.u64 = 0;
David Daney368bec02012-07-05 18:12:39 +0200484 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700485 do {
David Daney368bec02012-07-05 18:12:39 +0200486 mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
David Daneyd6aa60a2009-10-14 12:04:41 -0700487 } while (mix_ctl.s.busy);
488 mix_ctl.s.reset = 1;
David Daney368bec02012-07-05 18:12:39 +0200489 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
490 cvmx_read_csr(p->mix + MIX_CTL);
David Daneyd6aa60a2009-10-14 12:04:41 -0700491 cvmx_wait(64);
492
David Daney368bec02012-07-05 18:12:39 +0200493 mix_bist.u64 = cvmx_read_csr(p->mix + MIX_BIST);
David Daneyd6aa60a2009-10-14 12:04:41 -0700494 if (mix_bist.u64)
495 dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n",
496 (unsigned long long)mix_bist.u64);
497
498 agl_gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST);
499 if (agl_gmx_bist.u64)
500 dev_warn(p->dev, "AGL failed BIST (0x%016llx)\n",
501 (unsigned long long)agl_gmx_bist.u64);
502}
503
504struct octeon_mgmt_cam_state {
505 u64 cam[6];
506 u64 cam_mask;
507 int cam_index;
508};
509
510static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs,
511 unsigned char *addr)
512{
513 int i;
514
515 for (i = 0; i < 6; i++)
516 cs->cam[i] |= (u64)addr[i] << (8 * (cs->cam_index));
517 cs->cam_mask |= (1ULL << cs->cam_index);
518 cs->cam_index++;
519}
520
521static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
522{
523 struct octeon_mgmt *p = netdev_priv(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -0700524 union cvmx_agl_gmx_rxx_adr_ctl adr_ctl;
525 union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx;
526 unsigned long flags;
527 unsigned int prev_packet_enable;
528 unsigned int cam_mode = 1; /* 1 - Accept on CAM match */
529 unsigned int multicast_mode = 1; /* 1 - Reject all multicast. */
530 struct octeon_mgmt_cam_state cam_state;
Jiri Pirko22bedad32010-04-01 21:22:57 +0000531 struct netdev_hw_addr *ha;
David Daneyd6aa60a2009-10-14 12:04:41 -0700532 int available_cam_entries;
533
534 memset(&cam_state, 0, sizeof(cam_state));
535
David Daney62538d22010-05-05 13:03:08 +0000536 if ((netdev->flags & IFF_PROMISC) || netdev->uc.count > 7) {
David Daneyd6aa60a2009-10-14 12:04:41 -0700537 cam_mode = 0;
538 available_cam_entries = 8;
539 } else {
540 /*
541 * One CAM entry for the primary address, leaves seven
542 * for the secondary addresses.
543 */
David Daney62538d22010-05-05 13:03:08 +0000544 available_cam_entries = 7 - netdev->uc.count;
David Daneyd6aa60a2009-10-14 12:04:41 -0700545 }
546
547 if (netdev->flags & IFF_MULTICAST) {
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000548 if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) ||
549 netdev_mc_count(netdev) > available_cam_entries)
David Daney62538d22010-05-05 13:03:08 +0000550 multicast_mode = 2; /* 2 - Accept all multicast. */
David Daneyd6aa60a2009-10-14 12:04:41 -0700551 else
552 multicast_mode = 0; /* 0 - Use CAM. */
553 }
554
555 if (cam_mode == 1) {
556 /* Add primary address. */
557 octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr);
David Daney62538d22010-05-05 13:03:08 +0000558 netdev_for_each_uc_addr(ha, netdev)
559 octeon_mgmt_cam_state_add(&cam_state, ha->addr);
David Daneyd6aa60a2009-10-14 12:04:41 -0700560 }
561 if (multicast_mode == 0) {
Jiri Pirko22bedad32010-04-01 21:22:57 +0000562 netdev_for_each_mc_addr(ha, netdev)
563 octeon_mgmt_cam_state_add(&cam_state, ha->addr);
David Daneyd6aa60a2009-10-14 12:04:41 -0700564 }
565
David Daneyd6aa60a2009-10-14 12:04:41 -0700566 spin_lock_irqsave(&p->lock, flags);
567
568 /* Disable packet I/O. */
David Daney368bec02012-07-05 18:12:39 +0200569 agl_gmx_prtx.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
David Daneyd6aa60a2009-10-14 12:04:41 -0700570 prev_packet_enable = agl_gmx_prtx.s.en;
571 agl_gmx_prtx.s.en = 0;
David Daney368bec02012-07-05 18:12:39 +0200572 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700573
David Daneyd6aa60a2009-10-14 12:04:41 -0700574 adr_ctl.u64 = 0;
575 adr_ctl.s.cam_mode = cam_mode;
576 adr_ctl.s.mcst = multicast_mode;
577 adr_ctl.s.bcst = 1; /* Allow broadcast */
578
David Daney368bec02012-07-05 18:12:39 +0200579 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CTL, adr_ctl.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700580
David Daney368bec02012-07-05 18:12:39 +0200581 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM0, cam_state.cam[0]);
582 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM1, cam_state.cam[1]);
583 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM2, cam_state.cam[2]);
584 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM3, cam_state.cam[3]);
585 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM4, cam_state.cam[4]);
586 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM5, cam_state.cam[5]);
587 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM_EN, cam_state.cam_mask);
David Daneyd6aa60a2009-10-14 12:04:41 -0700588
589 /* Restore packet I/O. */
590 agl_gmx_prtx.s.en = prev_packet_enable;
David Daney368bec02012-07-05 18:12:39 +0200591 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700592
593 spin_unlock_irqrestore(&p->lock, flags);
594}
595
596static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr)
597{
598 struct sockaddr *sa = addr;
599
600 if (!is_valid_ether_addr(sa->sa_data))
601 return -EADDRNOTAVAIL;
602
603 memcpy(netdev->dev_addr, sa->sa_data, ETH_ALEN);
604
605 octeon_mgmt_set_rx_filtering(netdev);
606
607 return 0;
608}
609
610static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
611{
612 struct octeon_mgmt *p = netdev_priv(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -0700613 int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM;
614
615 /*
616 * Limit the MTU to make sure the ethernet packets are between
617 * 64 bytes and 16383 bytes.
618 */
619 if (size_without_fcs < 64 || size_without_fcs > 16383) {
620 dev_warn(p->dev, "MTU must be between %d and %d.\n",
621 64 - OCTEON_MGMT_RX_HEADROOM,
622 16383 - OCTEON_MGMT_RX_HEADROOM);
623 return -EINVAL;
624 }
625
626 netdev->mtu = new_mtu;
627
David Daney368bec02012-07-05 18:12:39 +0200628 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, size_without_fcs);
629 cvmx_write_csr(p->agl + AGL_GMX_RX_JABBER,
David Daneyd6aa60a2009-10-14 12:04:41 -0700630 (size_without_fcs + 7) & 0xfff8);
631
632 return 0;
633}
634
635static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id)
636{
637 struct net_device *netdev = dev_id;
638 struct octeon_mgmt *p = netdev_priv(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -0700639 union cvmx_mixx_isr mixx_isr;
640
David Daney368bec02012-07-05 18:12:39 +0200641 mixx_isr.u64 = cvmx_read_csr(p->mix + MIX_ISR);
David Daneyd6aa60a2009-10-14 12:04:41 -0700642
643 /* Clear any pending interrupts */
David Daney368bec02012-07-05 18:12:39 +0200644 cvmx_write_csr(p->mix + MIX_ISR, mixx_isr.u64);
645 cvmx_read_csr(p->mix + MIX_ISR);
David Daneyd6aa60a2009-10-14 12:04:41 -0700646
647 if (mixx_isr.s.irthresh) {
648 octeon_mgmt_disable_rx_irq(p);
649 napi_schedule(&p->napi);
650 }
651 if (mixx_isr.s.orthresh) {
652 octeon_mgmt_disable_tx_irq(p);
653 tasklet_schedule(&p->tx_clean_tasklet);
654 }
655
656 return IRQ_HANDLED;
657}
658
659static int octeon_mgmt_ioctl(struct net_device *netdev,
660 struct ifreq *rq, int cmd)
661{
662 struct octeon_mgmt *p = netdev_priv(netdev);
663
664 if (!netif_running(netdev))
665 return -EINVAL;
666
667 if (!p->phydev)
668 return -EINVAL;
669
Richard Cochran28b04112010-07-17 08:48:55 +0000670 return phy_mii_ioctl(p->phydev, rq, cmd);
David Daneyd6aa60a2009-10-14 12:04:41 -0700671}
672
673static void octeon_mgmt_adjust_link(struct net_device *netdev)
674{
675 struct octeon_mgmt *p = netdev_priv(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -0700676 union cvmx_agl_gmx_prtx_cfg prtx_cfg;
677 unsigned long flags;
678 int link_changed = 0;
679
680 spin_lock_irqsave(&p->lock, flags);
681 if (p->phydev->link) {
682 if (!p->last_link)
683 link_changed = 1;
684 if (p->last_duplex != p->phydev->duplex) {
685 p->last_duplex = p->phydev->duplex;
David Daney368bec02012-07-05 18:12:39 +0200686 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
David Daneyd6aa60a2009-10-14 12:04:41 -0700687 prtx_cfg.s.duplex = p->phydev->duplex;
David Daney368bec02012-07-05 18:12:39 +0200688 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700689 }
690 } else {
691 if (p->last_link)
692 link_changed = -1;
693 }
694 p->last_link = p->phydev->link;
695 spin_unlock_irqrestore(&p->lock, flags);
696
697 if (link_changed != 0) {
698 if (link_changed > 0) {
699 netif_carrier_on(netdev);
700 pr_info("%s: Link is up - %d/%s\n", netdev->name,
701 p->phydev->speed,
702 DUPLEX_FULL == p->phydev->duplex ?
703 "Full" : "Half");
704 } else {
705 netif_carrier_off(netdev);
706 pr_info("%s: Link is down\n", netdev->name);
707 }
708 }
709}
710
711static int octeon_mgmt_init_phy(struct net_device *netdev)
712{
713 struct octeon_mgmt *p = netdev_priv(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -0700714
David Daney368bec02012-07-05 18:12:39 +0200715 if (octeon_is_simulation() || p->phy_np == NULL) {
David Daneyd6aa60a2009-10-14 12:04:41 -0700716 /* No PHYs in the simulator. */
717 netif_carrier_on(netdev);
718 return 0;
719 }
720
David Daney368bec02012-07-05 18:12:39 +0200721 p->phydev = of_phy_connect(netdev, p->phy_np,
722 octeon_mgmt_adjust_link, 0,
723 PHY_INTERFACE_MODE_MII);
David Daneyd6aa60a2009-10-14 12:04:41 -0700724
725 if (IS_ERR(p->phydev)) {
726 p->phydev = NULL;
727 return -1;
728 }
729
730 phy_start_aneg(p->phydev);
731
732 return 0;
733}
734
735static int octeon_mgmt_open(struct net_device *netdev)
736{
737 struct octeon_mgmt *p = netdev_priv(netdev);
738 int port = p->port;
739 union cvmx_mixx_ctl mix_ctl;
740 union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode;
741 union cvmx_mixx_oring1 oring1;
742 union cvmx_mixx_iring1 iring1;
743 union cvmx_agl_gmx_prtx_cfg prtx_cfg;
744 union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
745 union cvmx_mixx_irhwm mix_irhwm;
746 union cvmx_mixx_orhwm mix_orhwm;
747 union cvmx_mixx_intena mix_intena;
748 struct sockaddr sa;
749
750 /* Allocate ring buffers. */
751 p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
752 GFP_KERNEL);
753 if (!p->tx_ring)
754 return -ENOMEM;
755 p->tx_ring_handle =
756 dma_map_single(p->dev, p->tx_ring,
757 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
758 DMA_BIDIRECTIONAL);
759 p->tx_next = 0;
760 p->tx_next_clean = 0;
761 p->tx_current_fill = 0;
762
763
764 p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
765 GFP_KERNEL);
766 if (!p->rx_ring)
767 goto err_nomem;
768 p->rx_ring_handle =
769 dma_map_single(p->dev, p->rx_ring,
770 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
771 DMA_BIDIRECTIONAL);
772
773 p->rx_next = 0;
774 p->rx_next_fill = 0;
775 p->rx_current_fill = 0;
776
777 octeon_mgmt_reset_hw(p);
778
David Daney368bec02012-07-05 18:12:39 +0200779 mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
David Daneyd6aa60a2009-10-14 12:04:41 -0700780
781 /* Bring it out of reset if needed. */
782 if (mix_ctl.s.reset) {
783 mix_ctl.s.reset = 0;
David Daney368bec02012-07-05 18:12:39 +0200784 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700785 do {
David Daney368bec02012-07-05 18:12:39 +0200786 mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
David Daneyd6aa60a2009-10-14 12:04:41 -0700787 } while (mix_ctl.s.reset);
788 }
789
790 agl_gmx_inf_mode.u64 = 0;
791 agl_gmx_inf_mode.s.en = 1;
792 cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
793
794 oring1.u64 = 0;
795 oring1.s.obase = p->tx_ring_handle >> 3;
796 oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE;
David Daney368bec02012-07-05 18:12:39 +0200797 cvmx_write_csr(p->mix + MIX_ORING1, oring1.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700798
799 iring1.u64 = 0;
800 iring1.s.ibase = p->rx_ring_handle >> 3;
801 iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE;
David Daney368bec02012-07-05 18:12:39 +0200802 cvmx_write_csr(p->mix + MIX_IRING1, iring1.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700803
804 /* Disable packet I/O. */
David Daney368bec02012-07-05 18:12:39 +0200805 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
David Daneyd6aa60a2009-10-14 12:04:41 -0700806 prtx_cfg.s.en = 0;
David Daney368bec02012-07-05 18:12:39 +0200807 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700808
809 memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN);
810 octeon_mgmt_set_mac_address(netdev, &sa);
811
812 octeon_mgmt_change_mtu(netdev, netdev->mtu);
813
814 /*
815 * Enable the port HW. Packets are not allowed until
816 * cvmx_mgmt_port_enable() is called.
817 */
818 mix_ctl.u64 = 0;
819 mix_ctl.s.crc_strip = 1; /* Strip the ending CRC */
820 mix_ctl.s.en = 1; /* Enable the port */
821 mix_ctl.s.nbtarb = 0; /* Arbitration mode */
822 /* MII CB-request FIFO programmable high watermark */
823 mix_ctl.s.mrq_hwm = 1;
David Daney368bec02012-07-05 18:12:39 +0200824 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700825
826 if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
827 || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
828 /*
829 * Force compensation values, as they are not
830 * determined properly by HW
831 */
832 union cvmx_agl_gmx_drv_ctl drv_ctl;
833
834 drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
835 if (port) {
836 drv_ctl.s.byp_en1 = 1;
837 drv_ctl.s.nctl1 = 6;
838 drv_ctl.s.pctl1 = 6;
839 } else {
840 drv_ctl.s.byp_en = 1;
841 drv_ctl.s.nctl = 6;
842 drv_ctl.s.pctl = 6;
843 }
844 cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64);
845 }
846
847 octeon_mgmt_rx_fill_ring(netdev);
848
849 /* Clear statistics. */
850 /* Clear on read. */
David Daney368bec02012-07-05 18:12:39 +0200851 cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_CTL, 1);
852 cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP, 0);
853 cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD, 0);
David Daneyd6aa60a2009-10-14 12:04:41 -0700854
David Daney368bec02012-07-05 18:12:39 +0200855 cvmx_write_csr(p->agl + AGL_GMX_TX_STATS_CTL, 1);
856 cvmx_write_csr(p->agl + AGL_GMX_TX_STAT0, 0);
857 cvmx_write_csr(p->agl + AGL_GMX_TX_STAT1, 0);
David Daneyd6aa60a2009-10-14 12:04:41 -0700858
859 /* Clear any pending interrupts */
David Daney368bec02012-07-05 18:12:39 +0200860 cvmx_write_csr(p->mix + MIX_ISR, cvmx_read_csr(p->mix + MIX_ISR));
David Daneyd6aa60a2009-10-14 12:04:41 -0700861
862 if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name,
863 netdev)) {
864 dev_err(p->dev, "request_irq(%d) failed.\n", p->irq);
865 goto err_noirq;
866 }
867
868 /* Interrupt every single RX packet */
869 mix_irhwm.u64 = 0;
870 mix_irhwm.s.irhwm = 0;
David Daney368bec02012-07-05 18:12:39 +0200871 cvmx_write_csr(p->mix + MIX_IRHWM, mix_irhwm.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700872
David Daneyb635e062010-05-05 13:03:11 +0000873 /* Interrupt when we have 1 or more packets to clean. */
David Daneyd6aa60a2009-10-14 12:04:41 -0700874 mix_orhwm.u64 = 0;
David Daneyb635e062010-05-05 13:03:11 +0000875 mix_orhwm.s.orhwm = 1;
David Daney368bec02012-07-05 18:12:39 +0200876 cvmx_write_csr(p->mix + MIX_ORHWM, mix_orhwm.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700877
878 /* Enable receive and transmit interrupts */
879 mix_intena.u64 = 0;
880 mix_intena.s.ithena = 1;
881 mix_intena.s.othena = 1;
David Daney368bec02012-07-05 18:12:39 +0200882 cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700883
884
885 /* Enable packet I/O. */
886
887 rxx_frm_ctl.u64 = 0;
888 rxx_frm_ctl.s.pre_align = 1;
889 /*
890 * When set, disables the length check for non-min sized pkts
891 * with padding in the client data.
892 */
893 rxx_frm_ctl.s.pad_len = 1;
894 /* When set, disables the length check for VLAN pkts */
895 rxx_frm_ctl.s.vlan_len = 1;
896 /* When set, PREAMBLE checking is less strict */
897 rxx_frm_ctl.s.pre_free = 1;
898 /* Control Pause Frames can match station SMAC */
899 rxx_frm_ctl.s.ctl_smac = 0;
900 /* Control Pause Frames can match globally assign Multicast address */
901 rxx_frm_ctl.s.ctl_mcst = 1;
902 /* Forward pause information to TX block */
903 rxx_frm_ctl.s.ctl_bck = 1;
904 /* Drop Control Pause Frames */
905 rxx_frm_ctl.s.ctl_drp = 1;
906 /* Strip off the preamble */
907 rxx_frm_ctl.s.pre_strp = 1;
908 /*
909 * This port is configured to send PREAMBLE+SFD to begin every
910 * frame. GMX checks that the PREAMBLE is sent correctly.
911 */
912 rxx_frm_ctl.s.pre_chk = 1;
David Daney368bec02012-07-05 18:12:39 +0200913 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700914
915 /* Enable the AGL block */
916 agl_gmx_inf_mode.u64 = 0;
917 agl_gmx_inf_mode.s.en = 1;
918 cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
919
920 /* Configure the port duplex and enables */
David Daney368bec02012-07-05 18:12:39 +0200921 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
David Daneyd6aa60a2009-10-14 12:04:41 -0700922 prtx_cfg.s.tx_en = 1;
923 prtx_cfg.s.rx_en = 1;
924 prtx_cfg.s.en = 1;
925 p->last_duplex = 1;
926 prtx_cfg.s.duplex = p->last_duplex;
David Daney368bec02012-07-05 18:12:39 +0200927 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700928
929 p->last_link = 0;
930 netif_carrier_off(netdev);
931
932 if (octeon_mgmt_init_phy(netdev)) {
933 dev_err(p->dev, "Cannot initialize PHY.\n");
934 goto err_noirq;
935 }
936
937 netif_wake_queue(netdev);
938 napi_enable(&p->napi);
939
940 return 0;
941err_noirq:
942 octeon_mgmt_reset_hw(p);
943 dma_unmap_single(p->dev, p->rx_ring_handle,
944 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
945 DMA_BIDIRECTIONAL);
946 kfree(p->rx_ring);
947err_nomem:
948 dma_unmap_single(p->dev, p->tx_ring_handle,
949 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
950 DMA_BIDIRECTIONAL);
951 kfree(p->tx_ring);
952 return -ENOMEM;
953}
954
955static int octeon_mgmt_stop(struct net_device *netdev)
956{
957 struct octeon_mgmt *p = netdev_priv(netdev);
958
959 napi_disable(&p->napi);
960 netif_stop_queue(netdev);
961
962 if (p->phydev)
963 phy_disconnect(p->phydev);
964
965 netif_carrier_off(netdev);
966
967 octeon_mgmt_reset_hw(p);
968
David Daneyd6aa60a2009-10-14 12:04:41 -0700969 free_irq(p->irq, netdev);
970
971 /* dma_unmap is a nop on Octeon, so just free everything. */
972 skb_queue_purge(&p->tx_list);
973 skb_queue_purge(&p->rx_list);
974
975 dma_unmap_single(p->dev, p->rx_ring_handle,
976 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
977 DMA_BIDIRECTIONAL);
978 kfree(p->rx_ring);
979
980 dma_unmap_single(p->dev, p->tx_ring_handle,
981 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
982 DMA_BIDIRECTIONAL);
983 kfree(p->tx_ring);
984
David Daneyd6aa60a2009-10-14 12:04:41 -0700985 return 0;
986}
987
988static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
989{
990 struct octeon_mgmt *p = netdev_priv(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -0700991 union mgmt_port_ring_entry re;
992 unsigned long flags;
David Daney4e4a4f12010-05-05 13:03:12 +0000993 int rv = NETDEV_TX_BUSY;
David Daneyd6aa60a2009-10-14 12:04:41 -0700994
995 re.d64 = 0;
996 re.s.len = skb->len;
997 re.s.addr = dma_map_single(p->dev, skb->data,
998 skb->len,
999 DMA_TO_DEVICE);
1000
1001 spin_lock_irqsave(&p->tx_list.lock, flags);
1002
David Daney4e4a4f12010-05-05 13:03:12 +00001003 if (unlikely(p->tx_current_fill >= ring_max_fill(OCTEON_MGMT_TX_RING_SIZE) - 1)) {
1004 spin_unlock_irqrestore(&p->tx_list.lock, flags);
1005 netif_stop_queue(netdev);
1006 spin_lock_irqsave(&p->tx_list.lock, flags);
1007 }
1008
David Daneyd6aa60a2009-10-14 12:04:41 -07001009 if (unlikely(p->tx_current_fill >=
1010 ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) {
1011 spin_unlock_irqrestore(&p->tx_list.lock, flags);
David Daneyd6aa60a2009-10-14 12:04:41 -07001012 dma_unmap_single(p->dev, re.s.addr, re.s.len,
1013 DMA_TO_DEVICE);
David Daney4e4a4f12010-05-05 13:03:12 +00001014 goto out;
David Daneyd6aa60a2009-10-14 12:04:41 -07001015 }
1016
1017 __skb_queue_tail(&p->tx_list, skb);
1018
1019 /* Put it in the ring. */
1020 p->tx_ring[p->tx_next] = re.d64;
1021 p->tx_next = (p->tx_next + 1) % OCTEON_MGMT_TX_RING_SIZE;
1022 p->tx_current_fill++;
1023
1024 spin_unlock_irqrestore(&p->tx_list.lock, flags);
1025
1026 dma_sync_single_for_device(p->dev, p->tx_ring_handle,
1027 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
1028 DMA_BIDIRECTIONAL);
1029
1030 netdev->stats.tx_packets++;
1031 netdev->stats.tx_bytes += skb->len;
1032
1033 /* Ring the bell. */
David Daney368bec02012-07-05 18:12:39 +02001034 cvmx_write_csr(p->mix + MIX_ORING2, 1);
David Daneyd6aa60a2009-10-14 12:04:41 -07001035
David Daney4e4a4f12010-05-05 13:03:12 +00001036 rv = NETDEV_TX_OK;
1037out:
David Daneyd6aa60a2009-10-14 12:04:41 -07001038 octeon_mgmt_update_tx_stats(netdev);
David Daney4e4a4f12010-05-05 13:03:12 +00001039 return rv;
David Daneyd6aa60a2009-10-14 12:04:41 -07001040}
1041
1042#ifdef CONFIG_NET_POLL_CONTROLLER
1043static void octeon_mgmt_poll_controller(struct net_device *netdev)
1044{
1045 struct octeon_mgmt *p = netdev_priv(netdev);
1046
1047 octeon_mgmt_receive_packets(p, 16);
1048 octeon_mgmt_update_rx_stats(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -07001049}
1050#endif
1051
1052static void octeon_mgmt_get_drvinfo(struct net_device *netdev,
1053 struct ethtool_drvinfo *info)
1054{
1055 strncpy(info->driver, DRV_NAME, sizeof(info->driver));
1056 strncpy(info->version, DRV_VERSION, sizeof(info->version));
1057 strncpy(info->fw_version, "N/A", sizeof(info->fw_version));
1058 strncpy(info->bus_info, "N/A", sizeof(info->bus_info));
1059 info->n_stats = 0;
1060 info->testinfo_len = 0;
1061 info->regdump_len = 0;
1062 info->eedump_len = 0;
1063}
1064
1065static int octeon_mgmt_get_settings(struct net_device *netdev,
1066 struct ethtool_cmd *cmd)
1067{
1068 struct octeon_mgmt *p = netdev_priv(netdev);
1069
1070 if (p->phydev)
1071 return phy_ethtool_gset(p->phydev, cmd);
1072
1073 return -EINVAL;
1074}
1075
1076static int octeon_mgmt_set_settings(struct net_device *netdev,
1077 struct ethtool_cmd *cmd)
1078{
1079 struct octeon_mgmt *p = netdev_priv(netdev);
1080
1081 if (!capable(CAP_NET_ADMIN))
1082 return -EPERM;
1083
1084 if (p->phydev)
1085 return phy_ethtool_sset(p->phydev, cmd);
1086
1087 return -EINVAL;
1088}
1089
1090static const struct ethtool_ops octeon_mgmt_ethtool_ops = {
1091 .get_drvinfo = octeon_mgmt_get_drvinfo,
1092 .get_link = ethtool_op_get_link,
1093 .get_settings = octeon_mgmt_get_settings,
1094 .set_settings = octeon_mgmt_set_settings
1095};
1096
1097static const struct net_device_ops octeon_mgmt_ops = {
1098 .ndo_open = octeon_mgmt_open,
1099 .ndo_stop = octeon_mgmt_stop,
1100 .ndo_start_xmit = octeon_mgmt_xmit,
1101 .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering,
David Daneyd6aa60a2009-10-14 12:04:41 -07001102 .ndo_set_mac_address = octeon_mgmt_set_mac_address,
1103 .ndo_do_ioctl = octeon_mgmt_ioctl,
1104 .ndo_change_mtu = octeon_mgmt_change_mtu,
1105#ifdef CONFIG_NET_POLL_CONTROLLER
1106 .ndo_poll_controller = octeon_mgmt_poll_controller,
1107#endif
1108};
1109
David Daneyd30b1812010-06-24 09:14:47 +00001110static int __devinit octeon_mgmt_probe(struct platform_device *pdev)
David Daneyd6aa60a2009-10-14 12:04:41 -07001111{
David Daneyd6aa60a2009-10-14 12:04:41 -07001112 struct net_device *netdev;
1113 struct octeon_mgmt *p;
David Daney368bec02012-07-05 18:12:39 +02001114 const __be32 *data;
1115 const u8 *mac;
1116 struct resource *res_mix;
1117 struct resource *res_agl;
1118 int len;
1119 int result;
David Daneyd6aa60a2009-10-14 12:04:41 -07001120
1121 netdev = alloc_etherdev(sizeof(struct octeon_mgmt));
1122 if (netdev == NULL)
1123 return -ENOMEM;
1124
1125 dev_set_drvdata(&pdev->dev, netdev);
1126 p = netdev_priv(netdev);
1127 netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll,
1128 OCTEON_MGMT_NAPI_WEIGHT);
1129
1130 p->netdev = netdev;
1131 p->dev = &pdev->dev;
1132
David Daney368bec02012-07-05 18:12:39 +02001133 data = of_get_property(pdev->dev.of_node, "cell-index", &len);
1134 if (data && len == sizeof(*data)) {
1135 p->port = be32_to_cpup(data);
1136 } else {
1137 dev_err(&pdev->dev, "no 'cell-index' property\n");
1138 result = -ENXIO;
1139 goto err;
1140 }
1141
David Daneyd6aa60a2009-10-14 12:04:41 -07001142 snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port);
1143
David Daney368bec02012-07-05 18:12:39 +02001144 result = platform_get_irq(pdev, 0);
1145 if (result < 0)
David Daneyd6aa60a2009-10-14 12:04:41 -07001146 goto err;
1147
David Daney368bec02012-07-05 18:12:39 +02001148 p->irq = result;
1149
1150 res_mix = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1151 if (res_mix == NULL) {
1152 dev_err(&pdev->dev, "no 'reg' resource\n");
1153 result = -ENXIO;
1154 goto err;
1155 }
1156
1157 res_agl = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1158 if (res_agl == NULL) {
1159 dev_err(&pdev->dev, "no 'reg' resource\n");
1160 result = -ENXIO;
1161 goto err;
1162 }
1163
1164 p->mix_phys = res_mix->start;
1165 p->mix_size = resource_size(res_mix);
1166 p->agl_phys = res_agl->start;
1167 p->agl_size = resource_size(res_agl);
1168
1169
1170 if (!devm_request_mem_region(&pdev->dev, p->mix_phys, p->mix_size,
1171 res_mix->name)) {
1172 dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
1173 res_mix->name);
1174 result = -ENXIO;
1175 goto err;
1176 }
1177
1178 if (!devm_request_mem_region(&pdev->dev, p->agl_phys, p->agl_size,
1179 res_agl->name)) {
1180 result = -ENXIO;
1181 dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
1182 res_agl->name);
1183 goto err;
1184 }
1185
1186
1187 p->mix = (u64)devm_ioremap(&pdev->dev, p->mix_phys, p->mix_size);
1188 p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size);
1189
David Daneyd6aa60a2009-10-14 12:04:41 -07001190 spin_lock_init(&p->lock);
1191
1192 skb_queue_head_init(&p->tx_list);
1193 skb_queue_head_init(&p->rx_list);
1194 tasklet_init(&p->tx_clean_tasklet,
1195 octeon_mgmt_clean_tx_tasklet, (unsigned long)p);
1196
Jiri Pirko01789342011-08-16 06:29:00 +00001197 netdev->priv_flags |= IFF_UNICAST_FLT;
1198
David Daneyd6aa60a2009-10-14 12:04:41 -07001199 netdev->netdev_ops = &octeon_mgmt_ops;
1200 netdev->ethtool_ops = &octeon_mgmt_ethtool_ops;
1201
David Daney368bec02012-07-05 18:12:39 +02001202 mac = of_get_mac_address(pdev->dev.of_node);
David Daneyd6aa60a2009-10-14 12:04:41 -07001203
David Daney368bec02012-07-05 18:12:39 +02001204 if (mac)
1205 memcpy(netdev->dev_addr, mac, 6);
David Daneyd6aa60a2009-10-14 12:04:41 -07001206
David Daney368bec02012-07-05 18:12:39 +02001207 p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
1208
1209 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
1210 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
1211
1212 result = register_netdev(netdev);
1213 if (result)
David Daneyd6aa60a2009-10-14 12:04:41 -07001214 goto err;
1215
1216 dev_info(&pdev->dev, "Version " DRV_VERSION "\n");
1217 return 0;
David Daney368bec02012-07-05 18:12:39 +02001218
David Daneyd6aa60a2009-10-14 12:04:41 -07001219err:
1220 free_netdev(netdev);
David Daney368bec02012-07-05 18:12:39 +02001221 return result;
David Daneyd6aa60a2009-10-14 12:04:41 -07001222}
1223
David Daneyd30b1812010-06-24 09:14:47 +00001224static int __devexit octeon_mgmt_remove(struct platform_device *pdev)
David Daneyd6aa60a2009-10-14 12:04:41 -07001225{
1226 struct net_device *netdev = dev_get_drvdata(&pdev->dev);
1227
1228 unregister_netdev(netdev);
1229 free_netdev(netdev);
1230 return 0;
1231}
1232
David Daney368bec02012-07-05 18:12:39 +02001233static struct of_device_id octeon_mgmt_match[] = {
1234 {
1235 .compatible = "cavium,octeon-5750-mix",
1236 },
1237 {},
1238};
1239MODULE_DEVICE_TABLE(of, octeon_mgmt_match);
1240
David Daneyd6aa60a2009-10-14 12:04:41 -07001241static struct platform_driver octeon_mgmt_driver = {
1242 .driver = {
1243 .name = "octeon_mgmt",
1244 .owner = THIS_MODULE,
David Daney368bec02012-07-05 18:12:39 +02001245 .of_match_table = octeon_mgmt_match,
David Daneyd6aa60a2009-10-14 12:04:41 -07001246 },
1247 .probe = octeon_mgmt_probe,
David Daneyd30b1812010-06-24 09:14:47 +00001248 .remove = __devexit_p(octeon_mgmt_remove),
David Daneyd6aa60a2009-10-14 12:04:41 -07001249};
1250
1251extern void octeon_mdiobus_force_mod_depencency(void);
1252
1253static int __init octeon_mgmt_mod_init(void)
1254{
1255 /* Force our mdiobus driver module to be loaded first. */
1256 octeon_mdiobus_force_mod_depencency();
1257 return platform_driver_register(&octeon_mgmt_driver);
1258}
1259
1260static void __exit octeon_mgmt_mod_exit(void)
1261{
1262 platform_driver_unregister(&octeon_mgmt_driver);
1263}
1264
1265module_init(octeon_mgmt_mod_init);
1266module_exit(octeon_mgmt_mod_exit);
1267
1268MODULE_DESCRIPTION(DRV_DESCRIPTION);
1269MODULE_AUTHOR("David Daney");
1270MODULE_LICENSE("GPL");
1271MODULE_VERSION(DRV_VERSION);