blob: a688a2ddcfd612866edbce5f8ee08b7f92fbd8e0 [file] [log] [blame]
David Daneyd6aa60a2009-10-14 12:04:41 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2009 Cavium Networks
7 */
8
David Daneyd6aa60a2009-10-14 12:04:41 -07009#include <linux/platform_device.h>
David Daney368bec02012-07-05 18:12:39 +020010#include <linux/dma-mapping.h>
David Daneyd6aa60a2009-10-14 12:04:41 -070011#include <linux/etherdevice.h>
David Daney368bec02012-07-05 18:12:39 +020012#include <linux/capability.h>
13#include <linux/interrupt.h>
14#include <linux/netdevice.h>
15#include <linux/spinlock.h>
David Daneyd6aa60a2009-10-14 12:04:41 -070016#include <linux/if_vlan.h>
David Daney368bec02012-07-05 18:12:39 +020017#include <linux/of_mdio.h>
18#include <linux/module.h>
19#include <linux/of_net.h>
20#include <linux/init.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090021#include <linux/slab.h>
David Daneyd6aa60a2009-10-14 12:04:41 -070022#include <linux/phy.h>
David Daney368bec02012-07-05 18:12:39 +020023#include <linux/io.h>
David Daneyd6aa60a2009-10-14 12:04:41 -070024
25#include <asm/octeon/octeon.h>
26#include <asm/octeon/cvmx-mixx-defs.h>
27#include <asm/octeon/cvmx-agl-defs.h>
28
29#define DRV_NAME "octeon_mgmt"
30#define DRV_VERSION "2.0"
31#define DRV_DESCRIPTION \
32 "Cavium Networks Octeon MII (management) port Network Driver"
33
34#define OCTEON_MGMT_NAPI_WEIGHT 16
35
36/*
37 * Ring sizes that are powers of two allow for more efficient modulo
38 * opertions.
39 */
40#define OCTEON_MGMT_RX_RING_SIZE 512
41#define OCTEON_MGMT_TX_RING_SIZE 128
42
43/* Allow 8 bytes for vlan and FCS. */
44#define OCTEON_MGMT_RX_HEADROOM (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
45
46union mgmt_port_ring_entry {
47 u64 d64;
48 struct {
49 u64 reserved_62_63:2;
50 /* Length of the buffer/packet in bytes */
51 u64 len:14;
52 /* For TX, signals that the packet should be timestamped */
53 u64 tstamp:1;
54 /* The RX error code */
55 u64 code:7;
56#define RING_ENTRY_CODE_DONE 0xf
57#define RING_ENTRY_CODE_MORE 0x10
58 /* Physical address of the buffer */
59 u64 addr:40;
60 } s;
61};
62
David Daney368bec02012-07-05 18:12:39 +020063#define MIX_ORING1 0x0
64#define MIX_ORING2 0x8
65#define MIX_IRING1 0x10
66#define MIX_IRING2 0x18
67#define MIX_CTL 0x20
68#define MIX_IRHWM 0x28
69#define MIX_IRCNT 0x30
70#define MIX_ORHWM 0x38
71#define MIX_ORCNT 0x40
72#define MIX_ISR 0x48
73#define MIX_INTENA 0x50
74#define MIX_REMCNT 0x58
75#define MIX_BIST 0x78
76
77#define AGL_GMX_PRT_CFG 0x10
78#define AGL_GMX_RX_FRM_CTL 0x18
79#define AGL_GMX_RX_FRM_MAX 0x30
80#define AGL_GMX_RX_JABBER 0x38
81#define AGL_GMX_RX_STATS_CTL 0x50
82
83#define AGL_GMX_RX_STATS_PKTS_DRP 0xb0
84#define AGL_GMX_RX_STATS_OCTS_DRP 0xb8
85#define AGL_GMX_RX_STATS_PKTS_BAD 0xc0
86
87#define AGL_GMX_RX_ADR_CTL 0x100
88#define AGL_GMX_RX_ADR_CAM_EN 0x108
89#define AGL_GMX_RX_ADR_CAM0 0x180
90#define AGL_GMX_RX_ADR_CAM1 0x188
91#define AGL_GMX_RX_ADR_CAM2 0x190
92#define AGL_GMX_RX_ADR_CAM3 0x198
93#define AGL_GMX_RX_ADR_CAM4 0x1a0
94#define AGL_GMX_RX_ADR_CAM5 0x1a8
95
96#define AGL_GMX_TX_STATS_CTL 0x268
97#define AGL_GMX_TX_CTL 0x270
98#define AGL_GMX_TX_STAT0 0x280
99#define AGL_GMX_TX_STAT1 0x288
100#define AGL_GMX_TX_STAT2 0x290
101#define AGL_GMX_TX_STAT3 0x298
102#define AGL_GMX_TX_STAT4 0x2a0
103#define AGL_GMX_TX_STAT5 0x2a8
104#define AGL_GMX_TX_STAT6 0x2b0
105#define AGL_GMX_TX_STAT7 0x2b8
106#define AGL_GMX_TX_STAT8 0x2c0
107#define AGL_GMX_TX_STAT9 0x2c8
108
David Daneyd6aa60a2009-10-14 12:04:41 -0700109struct octeon_mgmt {
110 struct net_device *netdev;
David Daney368bec02012-07-05 18:12:39 +0200111 u64 mix;
112 u64 agl;
David Daneyd6aa60a2009-10-14 12:04:41 -0700113 int port;
114 int irq;
115 u64 *tx_ring;
116 dma_addr_t tx_ring_handle;
117 unsigned int tx_next;
118 unsigned int tx_next_clean;
119 unsigned int tx_current_fill;
120 /* The tx_list lock also protects the ring related variables */
121 struct sk_buff_head tx_list;
122
123 /* RX variables only touched in napi_poll. No locking necessary. */
124 u64 *rx_ring;
125 dma_addr_t rx_ring_handle;
126 unsigned int rx_next;
127 unsigned int rx_next_fill;
128 unsigned int rx_current_fill;
129 struct sk_buff_head rx_list;
130
131 spinlock_t lock;
132 unsigned int last_duplex;
133 unsigned int last_link;
134 struct device *dev;
135 struct napi_struct napi;
136 struct tasklet_struct tx_clean_tasklet;
137 struct phy_device *phydev;
David Daney368bec02012-07-05 18:12:39 +0200138 struct device_node *phy_np;
139 resource_size_t mix_phys;
140 resource_size_t mix_size;
141 resource_size_t agl_phys;
142 resource_size_t agl_size;
David Daneyd6aa60a2009-10-14 12:04:41 -0700143};
144
145static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable)
146{
David Daneyd6aa60a2009-10-14 12:04:41 -0700147 union cvmx_mixx_intena mix_intena;
148 unsigned long flags;
149
150 spin_lock_irqsave(&p->lock, flags);
David Daney368bec02012-07-05 18:12:39 +0200151 mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA);
David Daneyd6aa60a2009-10-14 12:04:41 -0700152 mix_intena.s.ithena = enable ? 1 : 0;
David Daney368bec02012-07-05 18:12:39 +0200153 cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700154 spin_unlock_irqrestore(&p->lock, flags);
155}
156
157static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable)
158{
David Daneyd6aa60a2009-10-14 12:04:41 -0700159 union cvmx_mixx_intena mix_intena;
160 unsigned long flags;
161
162 spin_lock_irqsave(&p->lock, flags);
David Daney368bec02012-07-05 18:12:39 +0200163 mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA);
David Daneyd6aa60a2009-10-14 12:04:41 -0700164 mix_intena.s.othena = enable ? 1 : 0;
David Daney368bec02012-07-05 18:12:39 +0200165 cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700166 spin_unlock_irqrestore(&p->lock, flags);
167}
168
169static inline void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p)
170{
171 octeon_mgmt_set_rx_irq(p, 1);
172}
173
174static inline void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p)
175{
176 octeon_mgmt_set_rx_irq(p, 0);
177}
178
179static inline void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p)
180{
181 octeon_mgmt_set_tx_irq(p, 1);
182}
183
184static inline void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p)
185{
186 octeon_mgmt_set_tx_irq(p, 0);
187}
188
189static unsigned int ring_max_fill(unsigned int ring_size)
190{
191 return ring_size - 8;
192}
193
194static unsigned int ring_size_to_bytes(unsigned int ring_size)
195{
196 return ring_size * sizeof(union mgmt_port_ring_entry);
197}
198
199static void octeon_mgmt_rx_fill_ring(struct net_device *netdev)
200{
201 struct octeon_mgmt *p = netdev_priv(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -0700202
203 while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) {
204 unsigned int size;
205 union mgmt_port_ring_entry re;
206 struct sk_buff *skb;
207
208 /* CN56XX pass 1 needs 8 bytes of padding. */
209 size = netdev->mtu + OCTEON_MGMT_RX_HEADROOM + 8 + NET_IP_ALIGN;
210
211 skb = netdev_alloc_skb(netdev, size);
212 if (!skb)
213 break;
214 skb_reserve(skb, NET_IP_ALIGN);
215 __skb_queue_tail(&p->rx_list, skb);
216
217 re.d64 = 0;
218 re.s.len = size;
219 re.s.addr = dma_map_single(p->dev, skb->data,
220 size,
221 DMA_FROM_DEVICE);
222
223 /* Put it in the ring. */
224 p->rx_ring[p->rx_next_fill] = re.d64;
225 dma_sync_single_for_device(p->dev, p->rx_ring_handle,
226 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
227 DMA_BIDIRECTIONAL);
228 p->rx_next_fill =
229 (p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE;
230 p->rx_current_fill++;
231 /* Ring the bell. */
David Daney368bec02012-07-05 18:12:39 +0200232 cvmx_write_csr(p->mix + MIX_IRING2, 1);
David Daneyd6aa60a2009-10-14 12:04:41 -0700233 }
234}
235
236static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
237{
David Daneyd6aa60a2009-10-14 12:04:41 -0700238 union cvmx_mixx_orcnt mix_orcnt;
239 union mgmt_port_ring_entry re;
240 struct sk_buff *skb;
241 int cleaned = 0;
242 unsigned long flags;
243
David Daney368bec02012-07-05 18:12:39 +0200244 mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
David Daneyd6aa60a2009-10-14 12:04:41 -0700245 while (mix_orcnt.s.orcnt) {
David Daney4d30b802010-05-05 13:03:09 +0000246 spin_lock_irqsave(&p->tx_list.lock, flags);
247
David Daney368bec02012-07-05 18:12:39 +0200248 mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
David Daney4d30b802010-05-05 13:03:09 +0000249
250 if (mix_orcnt.s.orcnt == 0) {
251 spin_unlock_irqrestore(&p->tx_list.lock, flags);
252 break;
253 }
254
David Daneyd6aa60a2009-10-14 12:04:41 -0700255 dma_sync_single_for_cpu(p->dev, p->tx_ring_handle,
256 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
257 DMA_BIDIRECTIONAL);
258
David Daneyd6aa60a2009-10-14 12:04:41 -0700259 re.d64 = p->tx_ring[p->tx_next_clean];
260 p->tx_next_clean =
261 (p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE;
262 skb = __skb_dequeue(&p->tx_list);
263
264 mix_orcnt.u64 = 0;
265 mix_orcnt.s.orcnt = 1;
266
267 /* Acknowledge to hardware that we have the buffer. */
David Daney368bec02012-07-05 18:12:39 +0200268 cvmx_write_csr(p->mix + MIX_ORCNT, mix_orcnt.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700269 p->tx_current_fill--;
270
271 spin_unlock_irqrestore(&p->tx_list.lock, flags);
272
273 dma_unmap_single(p->dev, re.s.addr, re.s.len,
274 DMA_TO_DEVICE);
275 dev_kfree_skb_any(skb);
276 cleaned++;
277
David Daney368bec02012-07-05 18:12:39 +0200278 mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
David Daneyd6aa60a2009-10-14 12:04:41 -0700279 }
280
281 if (cleaned && netif_queue_stopped(p->netdev))
282 netif_wake_queue(p->netdev);
283}
284
285static void octeon_mgmt_clean_tx_tasklet(unsigned long arg)
286{
287 struct octeon_mgmt *p = (struct octeon_mgmt *)arg;
288 octeon_mgmt_clean_tx_buffers(p);
289 octeon_mgmt_enable_tx_irq(p);
290}
291
292static void octeon_mgmt_update_rx_stats(struct net_device *netdev)
293{
294 struct octeon_mgmt *p = netdev_priv(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -0700295 unsigned long flags;
296 u64 drop, bad;
297
298 /* These reads also clear the count registers. */
David Daney368bec02012-07-05 18:12:39 +0200299 drop = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP);
300 bad = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD);
David Daneyd6aa60a2009-10-14 12:04:41 -0700301
302 if (drop || bad) {
303 /* Do an atomic update. */
304 spin_lock_irqsave(&p->lock, flags);
305 netdev->stats.rx_errors += bad;
306 netdev->stats.rx_dropped += drop;
307 spin_unlock_irqrestore(&p->lock, flags);
308 }
309}
310
311static void octeon_mgmt_update_tx_stats(struct net_device *netdev)
312{
313 struct octeon_mgmt *p = netdev_priv(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -0700314 unsigned long flags;
315
316 union cvmx_agl_gmx_txx_stat0 s0;
317 union cvmx_agl_gmx_txx_stat1 s1;
318
319 /* These reads also clear the count registers. */
David Daney368bec02012-07-05 18:12:39 +0200320 s0.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT0);
321 s1.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT1);
David Daneyd6aa60a2009-10-14 12:04:41 -0700322
323 if (s0.s.xsdef || s0.s.xscol || s1.s.scol || s1.s.mcol) {
324 /* Do an atomic update. */
325 spin_lock_irqsave(&p->lock, flags);
326 netdev->stats.tx_errors += s0.s.xsdef + s0.s.xscol;
327 netdev->stats.collisions += s1.s.scol + s1.s.mcol;
328 spin_unlock_irqrestore(&p->lock, flags);
329 }
330}
331
332/*
333 * Dequeue a receive skb and its corresponding ring entry. The ring
334 * entry is returned, *pskb is updated to point to the skb.
335 */
336static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p,
337 struct sk_buff **pskb)
338{
339 union mgmt_port_ring_entry re;
340
341 dma_sync_single_for_cpu(p->dev, p->rx_ring_handle,
342 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
343 DMA_BIDIRECTIONAL);
344
345 re.d64 = p->rx_ring[p->rx_next];
346 p->rx_next = (p->rx_next + 1) % OCTEON_MGMT_RX_RING_SIZE;
347 p->rx_current_fill--;
348 *pskb = __skb_dequeue(&p->rx_list);
349
350 dma_unmap_single(p->dev, re.s.addr,
351 ETH_FRAME_LEN + OCTEON_MGMT_RX_HEADROOM,
352 DMA_FROM_DEVICE);
353
354 return re.d64;
355}
356
357
358static int octeon_mgmt_receive_one(struct octeon_mgmt *p)
359{
David Daneyd6aa60a2009-10-14 12:04:41 -0700360 struct net_device *netdev = p->netdev;
361 union cvmx_mixx_ircnt mix_ircnt;
362 union mgmt_port_ring_entry re;
363 struct sk_buff *skb;
364 struct sk_buff *skb2;
365 struct sk_buff *skb_new;
366 union mgmt_port_ring_entry re2;
367 int rc = 1;
368
369
370 re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb);
371 if (likely(re.s.code == RING_ENTRY_CODE_DONE)) {
372 /* A good packet, send it up. */
373 skb_put(skb, re.s.len);
374good:
375 skb->protocol = eth_type_trans(skb, netdev);
376 netdev->stats.rx_packets++;
377 netdev->stats.rx_bytes += skb->len;
David Daneyd6aa60a2009-10-14 12:04:41 -0700378 netif_receive_skb(skb);
379 rc = 0;
380 } else if (re.s.code == RING_ENTRY_CODE_MORE) {
381 /*
382 * Packet split across skbs. This can happen if we
383 * increase the MTU. Buffers that are already in the
384 * rx ring can then end up being too small. As the rx
385 * ring is refilled, buffers sized for the new MTU
386 * will be used and we should go back to the normal
387 * non-split case.
388 */
389 skb_put(skb, re.s.len);
390 do {
391 re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
392 if (re2.s.code != RING_ENTRY_CODE_MORE
393 && re2.s.code != RING_ENTRY_CODE_DONE)
394 goto split_error;
395 skb_put(skb2, re2.s.len);
396 skb_new = skb_copy_expand(skb, 0, skb2->len,
397 GFP_ATOMIC);
398 if (!skb_new)
399 goto split_error;
400 if (skb_copy_bits(skb2, 0, skb_tail_pointer(skb_new),
401 skb2->len))
402 goto split_error;
403 skb_put(skb_new, skb2->len);
404 dev_kfree_skb_any(skb);
405 dev_kfree_skb_any(skb2);
406 skb = skb_new;
407 } while (re2.s.code == RING_ENTRY_CODE_MORE);
408 goto good;
409 } else {
410 /* Some other error, discard it. */
411 dev_kfree_skb_any(skb);
412 /*
413 * Error statistics are accumulated in
414 * octeon_mgmt_update_rx_stats.
415 */
416 }
417 goto done;
418split_error:
419 /* Discard the whole mess. */
420 dev_kfree_skb_any(skb);
421 dev_kfree_skb_any(skb2);
422 while (re2.s.code == RING_ENTRY_CODE_MORE) {
423 re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
424 dev_kfree_skb_any(skb2);
425 }
426 netdev->stats.rx_errors++;
427
428done:
429 /* Tell the hardware we processed a packet. */
430 mix_ircnt.u64 = 0;
431 mix_ircnt.s.ircnt = 1;
David Daney368bec02012-07-05 18:12:39 +0200432 cvmx_write_csr(p->mix + MIX_IRCNT, mix_ircnt.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700433 return rc;
David Daneyd6aa60a2009-10-14 12:04:41 -0700434}
435
436static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget)
437{
David Daneyd6aa60a2009-10-14 12:04:41 -0700438 unsigned int work_done = 0;
439 union cvmx_mixx_ircnt mix_ircnt;
440 int rc;
441
David Daney368bec02012-07-05 18:12:39 +0200442 mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT);
David Daneyd6aa60a2009-10-14 12:04:41 -0700443 while (work_done < budget && mix_ircnt.s.ircnt) {
444
445 rc = octeon_mgmt_receive_one(p);
446 if (!rc)
447 work_done++;
448
449 /* Check for more packets. */
David Daney368bec02012-07-05 18:12:39 +0200450 mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT);
David Daneyd6aa60a2009-10-14 12:04:41 -0700451 }
452
453 octeon_mgmt_rx_fill_ring(p->netdev);
454
455 return work_done;
456}
457
458static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget)
459{
460 struct octeon_mgmt *p = container_of(napi, struct octeon_mgmt, napi);
461 struct net_device *netdev = p->netdev;
462 unsigned int work_done = 0;
463
464 work_done = octeon_mgmt_receive_packets(p, budget);
465
466 if (work_done < budget) {
467 /* We stopped because no more packets were available. */
468 napi_complete(napi);
469 octeon_mgmt_enable_rx_irq(p);
470 }
471 octeon_mgmt_update_rx_stats(netdev);
472
473 return work_done;
474}
475
476/* Reset the hardware to clean state. */
477static void octeon_mgmt_reset_hw(struct octeon_mgmt *p)
478{
479 union cvmx_mixx_ctl mix_ctl;
480 union cvmx_mixx_bist mix_bist;
481 union cvmx_agl_gmx_bist agl_gmx_bist;
482
483 mix_ctl.u64 = 0;
David Daney368bec02012-07-05 18:12:39 +0200484 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700485 do {
David Daney368bec02012-07-05 18:12:39 +0200486 mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
David Daneyd6aa60a2009-10-14 12:04:41 -0700487 } while (mix_ctl.s.busy);
488 mix_ctl.s.reset = 1;
David Daney368bec02012-07-05 18:12:39 +0200489 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
490 cvmx_read_csr(p->mix + MIX_CTL);
David Daneyd6aa60a2009-10-14 12:04:41 -0700491 cvmx_wait(64);
492
David Daney368bec02012-07-05 18:12:39 +0200493 mix_bist.u64 = cvmx_read_csr(p->mix + MIX_BIST);
David Daneyd6aa60a2009-10-14 12:04:41 -0700494 if (mix_bist.u64)
495 dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n",
496 (unsigned long long)mix_bist.u64);
497
498 agl_gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST);
499 if (agl_gmx_bist.u64)
500 dev_warn(p->dev, "AGL failed BIST (0x%016llx)\n",
501 (unsigned long long)agl_gmx_bist.u64);
502}
503
504struct octeon_mgmt_cam_state {
505 u64 cam[6];
506 u64 cam_mask;
507 int cam_index;
508};
509
510static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs,
511 unsigned char *addr)
512{
513 int i;
514
515 for (i = 0; i < 6; i++)
516 cs->cam[i] |= (u64)addr[i] << (8 * (cs->cam_index));
517 cs->cam_mask |= (1ULL << cs->cam_index);
518 cs->cam_index++;
519}
520
521static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
522{
523 struct octeon_mgmt *p = netdev_priv(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -0700524 union cvmx_agl_gmx_rxx_adr_ctl adr_ctl;
525 union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx;
526 unsigned long flags;
527 unsigned int prev_packet_enable;
528 unsigned int cam_mode = 1; /* 1 - Accept on CAM match */
529 unsigned int multicast_mode = 1; /* 1 - Reject all multicast. */
530 struct octeon_mgmt_cam_state cam_state;
Jiri Pirko22bedad32010-04-01 21:22:57 +0000531 struct netdev_hw_addr *ha;
David Daneyd6aa60a2009-10-14 12:04:41 -0700532 int available_cam_entries;
533
534 memset(&cam_state, 0, sizeof(cam_state));
535
David Daney62538d22010-05-05 13:03:08 +0000536 if ((netdev->flags & IFF_PROMISC) || netdev->uc.count > 7) {
David Daneyd6aa60a2009-10-14 12:04:41 -0700537 cam_mode = 0;
538 available_cam_entries = 8;
539 } else {
540 /*
541 * One CAM entry for the primary address, leaves seven
542 * for the secondary addresses.
543 */
David Daney62538d22010-05-05 13:03:08 +0000544 available_cam_entries = 7 - netdev->uc.count;
David Daneyd6aa60a2009-10-14 12:04:41 -0700545 }
546
547 if (netdev->flags & IFF_MULTICAST) {
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000548 if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) ||
549 netdev_mc_count(netdev) > available_cam_entries)
David Daney62538d22010-05-05 13:03:08 +0000550 multicast_mode = 2; /* 2 - Accept all multicast. */
David Daneyd6aa60a2009-10-14 12:04:41 -0700551 else
552 multicast_mode = 0; /* 0 - Use CAM. */
553 }
554
555 if (cam_mode == 1) {
556 /* Add primary address. */
557 octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr);
David Daney62538d22010-05-05 13:03:08 +0000558 netdev_for_each_uc_addr(ha, netdev)
559 octeon_mgmt_cam_state_add(&cam_state, ha->addr);
David Daneyd6aa60a2009-10-14 12:04:41 -0700560 }
561 if (multicast_mode == 0) {
Jiri Pirko22bedad32010-04-01 21:22:57 +0000562 netdev_for_each_mc_addr(ha, netdev)
563 octeon_mgmt_cam_state_add(&cam_state, ha->addr);
David Daneyd6aa60a2009-10-14 12:04:41 -0700564 }
565
David Daneyd6aa60a2009-10-14 12:04:41 -0700566 spin_lock_irqsave(&p->lock, flags);
567
568 /* Disable packet I/O. */
David Daney368bec02012-07-05 18:12:39 +0200569 agl_gmx_prtx.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
David Daneyd6aa60a2009-10-14 12:04:41 -0700570 prev_packet_enable = agl_gmx_prtx.s.en;
571 agl_gmx_prtx.s.en = 0;
David Daney368bec02012-07-05 18:12:39 +0200572 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700573
David Daneyd6aa60a2009-10-14 12:04:41 -0700574 adr_ctl.u64 = 0;
575 adr_ctl.s.cam_mode = cam_mode;
576 adr_ctl.s.mcst = multicast_mode;
577 adr_ctl.s.bcst = 1; /* Allow broadcast */
578
David Daney368bec02012-07-05 18:12:39 +0200579 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CTL, adr_ctl.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700580
David Daney368bec02012-07-05 18:12:39 +0200581 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM0, cam_state.cam[0]);
582 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM1, cam_state.cam[1]);
583 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM2, cam_state.cam[2]);
584 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM3, cam_state.cam[3]);
585 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM4, cam_state.cam[4]);
586 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM5, cam_state.cam[5]);
587 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM_EN, cam_state.cam_mask);
David Daneyd6aa60a2009-10-14 12:04:41 -0700588
589 /* Restore packet I/O. */
590 agl_gmx_prtx.s.en = prev_packet_enable;
David Daney368bec02012-07-05 18:12:39 +0200591 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700592
593 spin_unlock_irqrestore(&p->lock, flags);
594}
595
596static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr)
597{
598 struct sockaddr *sa = addr;
599
600 if (!is_valid_ether_addr(sa->sa_data))
601 return -EADDRNOTAVAIL;
602
603 memcpy(netdev->dev_addr, sa->sa_data, ETH_ALEN);
604
605 octeon_mgmt_set_rx_filtering(netdev);
606
607 return 0;
608}
609
610static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
611{
612 struct octeon_mgmt *p = netdev_priv(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -0700613 int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM;
614
615 /*
616 * Limit the MTU to make sure the ethernet packets are between
617 * 64 bytes and 16383 bytes.
618 */
619 if (size_without_fcs < 64 || size_without_fcs > 16383) {
620 dev_warn(p->dev, "MTU must be between %d and %d.\n",
621 64 - OCTEON_MGMT_RX_HEADROOM,
622 16383 - OCTEON_MGMT_RX_HEADROOM);
623 return -EINVAL;
624 }
625
626 netdev->mtu = new_mtu;
627
David Daney368bec02012-07-05 18:12:39 +0200628 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, size_without_fcs);
629 cvmx_write_csr(p->agl + AGL_GMX_RX_JABBER,
David Daneyd6aa60a2009-10-14 12:04:41 -0700630 (size_without_fcs + 7) & 0xfff8);
631
632 return 0;
633}
634
635static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id)
636{
637 struct net_device *netdev = dev_id;
638 struct octeon_mgmt *p = netdev_priv(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -0700639 union cvmx_mixx_isr mixx_isr;
640
David Daney368bec02012-07-05 18:12:39 +0200641 mixx_isr.u64 = cvmx_read_csr(p->mix + MIX_ISR);
David Daneyd6aa60a2009-10-14 12:04:41 -0700642
643 /* Clear any pending interrupts */
David Daney368bec02012-07-05 18:12:39 +0200644 cvmx_write_csr(p->mix + MIX_ISR, mixx_isr.u64);
645 cvmx_read_csr(p->mix + MIX_ISR);
David Daneyd6aa60a2009-10-14 12:04:41 -0700646
647 if (mixx_isr.s.irthresh) {
648 octeon_mgmt_disable_rx_irq(p);
649 napi_schedule(&p->napi);
650 }
651 if (mixx_isr.s.orthresh) {
652 octeon_mgmt_disable_tx_irq(p);
653 tasklet_schedule(&p->tx_clean_tasklet);
654 }
655
656 return IRQ_HANDLED;
657}
658
659static int octeon_mgmt_ioctl(struct net_device *netdev,
660 struct ifreq *rq, int cmd)
661{
662 struct octeon_mgmt *p = netdev_priv(netdev);
663
664 if (!netif_running(netdev))
665 return -EINVAL;
666
667 if (!p->phydev)
668 return -EINVAL;
669
Richard Cochran28b04112010-07-17 08:48:55 +0000670 return phy_mii_ioctl(p->phydev, rq, cmd);
David Daneyd6aa60a2009-10-14 12:04:41 -0700671}
672
673static void octeon_mgmt_adjust_link(struct net_device *netdev)
674{
675 struct octeon_mgmt *p = netdev_priv(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -0700676 union cvmx_agl_gmx_prtx_cfg prtx_cfg;
677 unsigned long flags;
678 int link_changed = 0;
679
680 spin_lock_irqsave(&p->lock, flags);
681 if (p->phydev->link) {
682 if (!p->last_link)
683 link_changed = 1;
684 if (p->last_duplex != p->phydev->duplex) {
685 p->last_duplex = p->phydev->duplex;
David Daney368bec02012-07-05 18:12:39 +0200686 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
David Daneyd6aa60a2009-10-14 12:04:41 -0700687 prtx_cfg.s.duplex = p->phydev->duplex;
David Daney368bec02012-07-05 18:12:39 +0200688 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700689 }
690 } else {
691 if (p->last_link)
692 link_changed = -1;
693 }
694 p->last_link = p->phydev->link;
695 spin_unlock_irqrestore(&p->lock, flags);
696
697 if (link_changed != 0) {
698 if (link_changed > 0) {
699 netif_carrier_on(netdev);
700 pr_info("%s: Link is up - %d/%s\n", netdev->name,
701 p->phydev->speed,
702 DUPLEX_FULL == p->phydev->duplex ?
703 "Full" : "Half");
704 } else {
705 netif_carrier_off(netdev);
706 pr_info("%s: Link is down\n", netdev->name);
707 }
708 }
709}
710
711static int octeon_mgmt_init_phy(struct net_device *netdev)
712{
713 struct octeon_mgmt *p = netdev_priv(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -0700714
David Daney368bec02012-07-05 18:12:39 +0200715 if (octeon_is_simulation() || p->phy_np == NULL) {
David Daneyd6aa60a2009-10-14 12:04:41 -0700716 /* No PHYs in the simulator. */
717 netif_carrier_on(netdev);
718 return 0;
719 }
720
David Daney368bec02012-07-05 18:12:39 +0200721 p->phydev = of_phy_connect(netdev, p->phy_np,
722 octeon_mgmt_adjust_link, 0,
723 PHY_INTERFACE_MODE_MII);
David Daneyd6aa60a2009-10-14 12:04:41 -0700724
Wei Yongjundf555b62012-09-27 19:04:21 +0000725 if (!p->phydev)
David Daneyd6aa60a2009-10-14 12:04:41 -0700726 return -1;
David Daneyd6aa60a2009-10-14 12:04:41 -0700727
728 phy_start_aneg(p->phydev);
729
730 return 0;
731}
732
733static int octeon_mgmt_open(struct net_device *netdev)
734{
735 struct octeon_mgmt *p = netdev_priv(netdev);
736 int port = p->port;
737 union cvmx_mixx_ctl mix_ctl;
738 union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode;
739 union cvmx_mixx_oring1 oring1;
740 union cvmx_mixx_iring1 iring1;
741 union cvmx_agl_gmx_prtx_cfg prtx_cfg;
742 union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
743 union cvmx_mixx_irhwm mix_irhwm;
744 union cvmx_mixx_orhwm mix_orhwm;
745 union cvmx_mixx_intena mix_intena;
746 struct sockaddr sa;
747
748 /* Allocate ring buffers. */
749 p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
750 GFP_KERNEL);
751 if (!p->tx_ring)
752 return -ENOMEM;
753 p->tx_ring_handle =
754 dma_map_single(p->dev, p->tx_ring,
755 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
756 DMA_BIDIRECTIONAL);
757 p->tx_next = 0;
758 p->tx_next_clean = 0;
759 p->tx_current_fill = 0;
760
761
762 p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
763 GFP_KERNEL);
764 if (!p->rx_ring)
765 goto err_nomem;
766 p->rx_ring_handle =
767 dma_map_single(p->dev, p->rx_ring,
768 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
769 DMA_BIDIRECTIONAL);
770
771 p->rx_next = 0;
772 p->rx_next_fill = 0;
773 p->rx_current_fill = 0;
774
775 octeon_mgmt_reset_hw(p);
776
David Daney368bec02012-07-05 18:12:39 +0200777 mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
David Daneyd6aa60a2009-10-14 12:04:41 -0700778
779 /* Bring it out of reset if needed. */
780 if (mix_ctl.s.reset) {
781 mix_ctl.s.reset = 0;
David Daney368bec02012-07-05 18:12:39 +0200782 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700783 do {
David Daney368bec02012-07-05 18:12:39 +0200784 mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
David Daneyd6aa60a2009-10-14 12:04:41 -0700785 } while (mix_ctl.s.reset);
786 }
787
788 agl_gmx_inf_mode.u64 = 0;
789 agl_gmx_inf_mode.s.en = 1;
790 cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
791
792 oring1.u64 = 0;
793 oring1.s.obase = p->tx_ring_handle >> 3;
794 oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE;
David Daney368bec02012-07-05 18:12:39 +0200795 cvmx_write_csr(p->mix + MIX_ORING1, oring1.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700796
797 iring1.u64 = 0;
798 iring1.s.ibase = p->rx_ring_handle >> 3;
799 iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE;
David Daney368bec02012-07-05 18:12:39 +0200800 cvmx_write_csr(p->mix + MIX_IRING1, iring1.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700801
802 /* Disable packet I/O. */
David Daney368bec02012-07-05 18:12:39 +0200803 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
David Daneyd6aa60a2009-10-14 12:04:41 -0700804 prtx_cfg.s.en = 0;
David Daney368bec02012-07-05 18:12:39 +0200805 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700806
807 memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN);
808 octeon_mgmt_set_mac_address(netdev, &sa);
809
810 octeon_mgmt_change_mtu(netdev, netdev->mtu);
811
812 /*
813 * Enable the port HW. Packets are not allowed until
814 * cvmx_mgmt_port_enable() is called.
815 */
816 mix_ctl.u64 = 0;
817 mix_ctl.s.crc_strip = 1; /* Strip the ending CRC */
818 mix_ctl.s.en = 1; /* Enable the port */
819 mix_ctl.s.nbtarb = 0; /* Arbitration mode */
820 /* MII CB-request FIFO programmable high watermark */
821 mix_ctl.s.mrq_hwm = 1;
David Daney368bec02012-07-05 18:12:39 +0200822 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700823
824 if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
825 || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
826 /*
827 * Force compensation values, as they are not
828 * determined properly by HW
829 */
830 union cvmx_agl_gmx_drv_ctl drv_ctl;
831
832 drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
833 if (port) {
834 drv_ctl.s.byp_en1 = 1;
835 drv_ctl.s.nctl1 = 6;
836 drv_ctl.s.pctl1 = 6;
837 } else {
838 drv_ctl.s.byp_en = 1;
839 drv_ctl.s.nctl = 6;
840 drv_ctl.s.pctl = 6;
841 }
842 cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64);
843 }
844
845 octeon_mgmt_rx_fill_ring(netdev);
846
847 /* Clear statistics. */
848 /* Clear on read. */
David Daney368bec02012-07-05 18:12:39 +0200849 cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_CTL, 1);
850 cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP, 0);
851 cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD, 0);
David Daneyd6aa60a2009-10-14 12:04:41 -0700852
David Daney368bec02012-07-05 18:12:39 +0200853 cvmx_write_csr(p->agl + AGL_GMX_TX_STATS_CTL, 1);
854 cvmx_write_csr(p->agl + AGL_GMX_TX_STAT0, 0);
855 cvmx_write_csr(p->agl + AGL_GMX_TX_STAT1, 0);
David Daneyd6aa60a2009-10-14 12:04:41 -0700856
857 /* Clear any pending interrupts */
David Daney368bec02012-07-05 18:12:39 +0200858 cvmx_write_csr(p->mix + MIX_ISR, cvmx_read_csr(p->mix + MIX_ISR));
David Daneyd6aa60a2009-10-14 12:04:41 -0700859
860 if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name,
861 netdev)) {
862 dev_err(p->dev, "request_irq(%d) failed.\n", p->irq);
863 goto err_noirq;
864 }
865
866 /* Interrupt every single RX packet */
867 mix_irhwm.u64 = 0;
868 mix_irhwm.s.irhwm = 0;
David Daney368bec02012-07-05 18:12:39 +0200869 cvmx_write_csr(p->mix + MIX_IRHWM, mix_irhwm.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700870
David Daneyb635e062010-05-05 13:03:11 +0000871 /* Interrupt when we have 1 or more packets to clean. */
David Daneyd6aa60a2009-10-14 12:04:41 -0700872 mix_orhwm.u64 = 0;
David Daneyb635e062010-05-05 13:03:11 +0000873 mix_orhwm.s.orhwm = 1;
David Daney368bec02012-07-05 18:12:39 +0200874 cvmx_write_csr(p->mix + MIX_ORHWM, mix_orhwm.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700875
876 /* Enable receive and transmit interrupts */
877 mix_intena.u64 = 0;
878 mix_intena.s.ithena = 1;
879 mix_intena.s.othena = 1;
David Daney368bec02012-07-05 18:12:39 +0200880 cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700881
882
883 /* Enable packet I/O. */
884
885 rxx_frm_ctl.u64 = 0;
886 rxx_frm_ctl.s.pre_align = 1;
887 /*
888 * When set, disables the length check for non-min sized pkts
889 * with padding in the client data.
890 */
891 rxx_frm_ctl.s.pad_len = 1;
892 /* When set, disables the length check for VLAN pkts */
893 rxx_frm_ctl.s.vlan_len = 1;
894 /* When set, PREAMBLE checking is less strict */
895 rxx_frm_ctl.s.pre_free = 1;
896 /* Control Pause Frames can match station SMAC */
897 rxx_frm_ctl.s.ctl_smac = 0;
898 /* Control Pause Frames can match globally assign Multicast address */
899 rxx_frm_ctl.s.ctl_mcst = 1;
900 /* Forward pause information to TX block */
901 rxx_frm_ctl.s.ctl_bck = 1;
902 /* Drop Control Pause Frames */
903 rxx_frm_ctl.s.ctl_drp = 1;
904 /* Strip off the preamble */
905 rxx_frm_ctl.s.pre_strp = 1;
906 /*
907 * This port is configured to send PREAMBLE+SFD to begin every
908 * frame. GMX checks that the PREAMBLE is sent correctly.
909 */
910 rxx_frm_ctl.s.pre_chk = 1;
David Daney368bec02012-07-05 18:12:39 +0200911 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700912
913 /* Enable the AGL block */
914 agl_gmx_inf_mode.u64 = 0;
915 agl_gmx_inf_mode.s.en = 1;
916 cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
917
918 /* Configure the port duplex and enables */
David Daney368bec02012-07-05 18:12:39 +0200919 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
David Daneyd6aa60a2009-10-14 12:04:41 -0700920 prtx_cfg.s.tx_en = 1;
921 prtx_cfg.s.rx_en = 1;
922 prtx_cfg.s.en = 1;
923 p->last_duplex = 1;
924 prtx_cfg.s.duplex = p->last_duplex;
David Daney368bec02012-07-05 18:12:39 +0200925 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
David Daneyd6aa60a2009-10-14 12:04:41 -0700926
927 p->last_link = 0;
928 netif_carrier_off(netdev);
929
930 if (octeon_mgmt_init_phy(netdev)) {
931 dev_err(p->dev, "Cannot initialize PHY.\n");
932 goto err_noirq;
933 }
934
935 netif_wake_queue(netdev);
936 napi_enable(&p->napi);
937
938 return 0;
939err_noirq:
940 octeon_mgmt_reset_hw(p);
941 dma_unmap_single(p->dev, p->rx_ring_handle,
942 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
943 DMA_BIDIRECTIONAL);
944 kfree(p->rx_ring);
945err_nomem:
946 dma_unmap_single(p->dev, p->tx_ring_handle,
947 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
948 DMA_BIDIRECTIONAL);
949 kfree(p->tx_ring);
950 return -ENOMEM;
951}
952
953static int octeon_mgmt_stop(struct net_device *netdev)
954{
955 struct octeon_mgmt *p = netdev_priv(netdev);
956
957 napi_disable(&p->napi);
958 netif_stop_queue(netdev);
959
960 if (p->phydev)
961 phy_disconnect(p->phydev);
962
963 netif_carrier_off(netdev);
964
965 octeon_mgmt_reset_hw(p);
966
David Daneyd6aa60a2009-10-14 12:04:41 -0700967 free_irq(p->irq, netdev);
968
969 /* dma_unmap is a nop on Octeon, so just free everything. */
970 skb_queue_purge(&p->tx_list);
971 skb_queue_purge(&p->rx_list);
972
973 dma_unmap_single(p->dev, p->rx_ring_handle,
974 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
975 DMA_BIDIRECTIONAL);
976 kfree(p->rx_ring);
977
978 dma_unmap_single(p->dev, p->tx_ring_handle,
979 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
980 DMA_BIDIRECTIONAL);
981 kfree(p->tx_ring);
982
David Daneyd6aa60a2009-10-14 12:04:41 -0700983 return 0;
984}
985
986static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
987{
988 struct octeon_mgmt *p = netdev_priv(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -0700989 union mgmt_port_ring_entry re;
990 unsigned long flags;
David Daney4e4a4f12010-05-05 13:03:12 +0000991 int rv = NETDEV_TX_BUSY;
David Daneyd6aa60a2009-10-14 12:04:41 -0700992
993 re.d64 = 0;
994 re.s.len = skb->len;
995 re.s.addr = dma_map_single(p->dev, skb->data,
996 skb->len,
997 DMA_TO_DEVICE);
998
999 spin_lock_irqsave(&p->tx_list.lock, flags);
1000
David Daney4e4a4f12010-05-05 13:03:12 +00001001 if (unlikely(p->tx_current_fill >= ring_max_fill(OCTEON_MGMT_TX_RING_SIZE) - 1)) {
1002 spin_unlock_irqrestore(&p->tx_list.lock, flags);
1003 netif_stop_queue(netdev);
1004 spin_lock_irqsave(&p->tx_list.lock, flags);
1005 }
1006
David Daneyd6aa60a2009-10-14 12:04:41 -07001007 if (unlikely(p->tx_current_fill >=
1008 ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) {
1009 spin_unlock_irqrestore(&p->tx_list.lock, flags);
David Daneyd6aa60a2009-10-14 12:04:41 -07001010 dma_unmap_single(p->dev, re.s.addr, re.s.len,
1011 DMA_TO_DEVICE);
David Daney4e4a4f12010-05-05 13:03:12 +00001012 goto out;
David Daneyd6aa60a2009-10-14 12:04:41 -07001013 }
1014
1015 __skb_queue_tail(&p->tx_list, skb);
1016
1017 /* Put it in the ring. */
1018 p->tx_ring[p->tx_next] = re.d64;
1019 p->tx_next = (p->tx_next + 1) % OCTEON_MGMT_TX_RING_SIZE;
1020 p->tx_current_fill++;
1021
1022 spin_unlock_irqrestore(&p->tx_list.lock, flags);
1023
1024 dma_sync_single_for_device(p->dev, p->tx_ring_handle,
1025 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
1026 DMA_BIDIRECTIONAL);
1027
1028 netdev->stats.tx_packets++;
1029 netdev->stats.tx_bytes += skb->len;
1030
1031 /* Ring the bell. */
David Daney368bec02012-07-05 18:12:39 +02001032 cvmx_write_csr(p->mix + MIX_ORING2, 1);
David Daneyd6aa60a2009-10-14 12:04:41 -07001033
David Daney4e4a4f12010-05-05 13:03:12 +00001034 rv = NETDEV_TX_OK;
1035out:
David Daneyd6aa60a2009-10-14 12:04:41 -07001036 octeon_mgmt_update_tx_stats(netdev);
David Daney4e4a4f12010-05-05 13:03:12 +00001037 return rv;
David Daneyd6aa60a2009-10-14 12:04:41 -07001038}
1039
1040#ifdef CONFIG_NET_POLL_CONTROLLER
1041static void octeon_mgmt_poll_controller(struct net_device *netdev)
1042{
1043 struct octeon_mgmt *p = netdev_priv(netdev);
1044
1045 octeon_mgmt_receive_packets(p, 16);
1046 octeon_mgmt_update_rx_stats(netdev);
David Daneyd6aa60a2009-10-14 12:04:41 -07001047}
1048#endif
1049
1050static void octeon_mgmt_get_drvinfo(struct net_device *netdev,
1051 struct ethtool_drvinfo *info)
1052{
1053 strncpy(info->driver, DRV_NAME, sizeof(info->driver));
1054 strncpy(info->version, DRV_VERSION, sizeof(info->version));
1055 strncpy(info->fw_version, "N/A", sizeof(info->fw_version));
1056 strncpy(info->bus_info, "N/A", sizeof(info->bus_info));
1057 info->n_stats = 0;
1058 info->testinfo_len = 0;
1059 info->regdump_len = 0;
1060 info->eedump_len = 0;
1061}
1062
1063static int octeon_mgmt_get_settings(struct net_device *netdev,
1064 struct ethtool_cmd *cmd)
1065{
1066 struct octeon_mgmt *p = netdev_priv(netdev);
1067
1068 if (p->phydev)
1069 return phy_ethtool_gset(p->phydev, cmd);
1070
1071 return -EINVAL;
1072}
1073
1074static int octeon_mgmt_set_settings(struct net_device *netdev,
1075 struct ethtool_cmd *cmd)
1076{
1077 struct octeon_mgmt *p = netdev_priv(netdev);
1078
1079 if (!capable(CAP_NET_ADMIN))
1080 return -EPERM;
1081
1082 if (p->phydev)
1083 return phy_ethtool_sset(p->phydev, cmd);
1084
1085 return -EINVAL;
1086}
1087
1088static const struct ethtool_ops octeon_mgmt_ethtool_ops = {
1089 .get_drvinfo = octeon_mgmt_get_drvinfo,
1090 .get_link = ethtool_op_get_link,
1091 .get_settings = octeon_mgmt_get_settings,
1092 .set_settings = octeon_mgmt_set_settings
1093};
1094
1095static const struct net_device_ops octeon_mgmt_ops = {
1096 .ndo_open = octeon_mgmt_open,
1097 .ndo_stop = octeon_mgmt_stop,
1098 .ndo_start_xmit = octeon_mgmt_xmit,
1099 .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering,
David Daneyd6aa60a2009-10-14 12:04:41 -07001100 .ndo_set_mac_address = octeon_mgmt_set_mac_address,
1101 .ndo_do_ioctl = octeon_mgmt_ioctl,
1102 .ndo_change_mtu = octeon_mgmt_change_mtu,
1103#ifdef CONFIG_NET_POLL_CONTROLLER
1104 .ndo_poll_controller = octeon_mgmt_poll_controller,
1105#endif
1106};
1107
David Daneyd30b1812010-06-24 09:14:47 +00001108static int __devinit octeon_mgmt_probe(struct platform_device *pdev)
David Daneyd6aa60a2009-10-14 12:04:41 -07001109{
David Daneyd6aa60a2009-10-14 12:04:41 -07001110 struct net_device *netdev;
1111 struct octeon_mgmt *p;
David Daney368bec02012-07-05 18:12:39 +02001112 const __be32 *data;
1113 const u8 *mac;
1114 struct resource *res_mix;
1115 struct resource *res_agl;
1116 int len;
1117 int result;
David Daneyd6aa60a2009-10-14 12:04:41 -07001118
1119 netdev = alloc_etherdev(sizeof(struct octeon_mgmt));
1120 if (netdev == NULL)
1121 return -ENOMEM;
1122
1123 dev_set_drvdata(&pdev->dev, netdev);
1124 p = netdev_priv(netdev);
1125 netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll,
1126 OCTEON_MGMT_NAPI_WEIGHT);
1127
1128 p->netdev = netdev;
1129 p->dev = &pdev->dev;
1130
David Daney368bec02012-07-05 18:12:39 +02001131 data = of_get_property(pdev->dev.of_node, "cell-index", &len);
1132 if (data && len == sizeof(*data)) {
1133 p->port = be32_to_cpup(data);
1134 } else {
1135 dev_err(&pdev->dev, "no 'cell-index' property\n");
1136 result = -ENXIO;
1137 goto err;
1138 }
1139
David Daneyd6aa60a2009-10-14 12:04:41 -07001140 snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port);
1141
David Daney368bec02012-07-05 18:12:39 +02001142 result = platform_get_irq(pdev, 0);
1143 if (result < 0)
David Daneyd6aa60a2009-10-14 12:04:41 -07001144 goto err;
1145
David Daney368bec02012-07-05 18:12:39 +02001146 p->irq = result;
1147
1148 res_mix = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1149 if (res_mix == NULL) {
1150 dev_err(&pdev->dev, "no 'reg' resource\n");
1151 result = -ENXIO;
1152 goto err;
1153 }
1154
1155 res_agl = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1156 if (res_agl == NULL) {
1157 dev_err(&pdev->dev, "no 'reg' resource\n");
1158 result = -ENXIO;
1159 goto err;
1160 }
1161
1162 p->mix_phys = res_mix->start;
1163 p->mix_size = resource_size(res_mix);
1164 p->agl_phys = res_agl->start;
1165 p->agl_size = resource_size(res_agl);
1166
1167
1168 if (!devm_request_mem_region(&pdev->dev, p->mix_phys, p->mix_size,
1169 res_mix->name)) {
1170 dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
1171 res_mix->name);
1172 result = -ENXIO;
1173 goto err;
1174 }
1175
1176 if (!devm_request_mem_region(&pdev->dev, p->agl_phys, p->agl_size,
1177 res_agl->name)) {
1178 result = -ENXIO;
1179 dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
1180 res_agl->name);
1181 goto err;
1182 }
1183
1184
1185 p->mix = (u64)devm_ioremap(&pdev->dev, p->mix_phys, p->mix_size);
1186 p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size);
1187
David Daneyd6aa60a2009-10-14 12:04:41 -07001188 spin_lock_init(&p->lock);
1189
1190 skb_queue_head_init(&p->tx_list);
1191 skb_queue_head_init(&p->rx_list);
1192 tasklet_init(&p->tx_clean_tasklet,
1193 octeon_mgmt_clean_tx_tasklet, (unsigned long)p);
1194
Jiri Pirko01789342011-08-16 06:29:00 +00001195 netdev->priv_flags |= IFF_UNICAST_FLT;
1196
David Daneyd6aa60a2009-10-14 12:04:41 -07001197 netdev->netdev_ops = &octeon_mgmt_ops;
1198 netdev->ethtool_ops = &octeon_mgmt_ethtool_ops;
1199
David Daney368bec02012-07-05 18:12:39 +02001200 mac = of_get_mac_address(pdev->dev.of_node);
David Daneyd6aa60a2009-10-14 12:04:41 -07001201
David Daney368bec02012-07-05 18:12:39 +02001202 if (mac)
1203 memcpy(netdev->dev_addr, mac, 6);
David Daneyd6aa60a2009-10-14 12:04:41 -07001204
David Daney368bec02012-07-05 18:12:39 +02001205 p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
1206
1207 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
1208 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
1209
1210 result = register_netdev(netdev);
1211 if (result)
David Daneyd6aa60a2009-10-14 12:04:41 -07001212 goto err;
1213
1214 dev_info(&pdev->dev, "Version " DRV_VERSION "\n");
1215 return 0;
David Daney368bec02012-07-05 18:12:39 +02001216
David Daneyd6aa60a2009-10-14 12:04:41 -07001217err:
1218 free_netdev(netdev);
David Daney368bec02012-07-05 18:12:39 +02001219 return result;
David Daneyd6aa60a2009-10-14 12:04:41 -07001220}
1221
David Daneyd30b1812010-06-24 09:14:47 +00001222static int __devexit octeon_mgmt_remove(struct platform_device *pdev)
David Daneyd6aa60a2009-10-14 12:04:41 -07001223{
1224 struct net_device *netdev = dev_get_drvdata(&pdev->dev);
1225
1226 unregister_netdev(netdev);
1227 free_netdev(netdev);
1228 return 0;
1229}
1230
David Daney368bec02012-07-05 18:12:39 +02001231static struct of_device_id octeon_mgmt_match[] = {
1232 {
1233 .compatible = "cavium,octeon-5750-mix",
1234 },
1235 {},
1236};
1237MODULE_DEVICE_TABLE(of, octeon_mgmt_match);
1238
David Daneyd6aa60a2009-10-14 12:04:41 -07001239static struct platform_driver octeon_mgmt_driver = {
1240 .driver = {
1241 .name = "octeon_mgmt",
1242 .owner = THIS_MODULE,
David Daney368bec02012-07-05 18:12:39 +02001243 .of_match_table = octeon_mgmt_match,
David Daneyd6aa60a2009-10-14 12:04:41 -07001244 },
1245 .probe = octeon_mgmt_probe,
David Daneyd30b1812010-06-24 09:14:47 +00001246 .remove = __devexit_p(octeon_mgmt_remove),
David Daneyd6aa60a2009-10-14 12:04:41 -07001247};
1248
1249extern void octeon_mdiobus_force_mod_depencency(void);
1250
1251static int __init octeon_mgmt_mod_init(void)
1252{
1253 /* Force our mdiobus driver module to be loaded first. */
1254 octeon_mdiobus_force_mod_depencency();
1255 return platform_driver_register(&octeon_mgmt_driver);
1256}
1257
1258static void __exit octeon_mgmt_mod_exit(void)
1259{
1260 platform_driver_unregister(&octeon_mgmt_driver);
1261}
1262
1263module_init(octeon_mgmt_mod_init);
1264module_exit(octeon_mgmt_mod_exit);
1265
1266MODULE_DESCRIPTION(DRV_DESCRIPTION);
1267MODULE_AUTHOR("David Daney");
1268MODULE_LICENSE("GPL");
1269MODULE_VERSION(DRV_VERSION);