blob: bbbd737210f9d0a1c541c0974ddadcee7c93d554 [file] [log] [blame]
David Daneyd6aa60a2009-10-14 12:04:41 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2009 Cavium Networks
7 */
8
9#include <linux/capability.h>
10#include <linux/dma-mapping.h>
11#include <linux/init.h>
12#include <linux/platform_device.h>
13#include <linux/netdevice.h>
14#include <linux/etherdevice.h>
15#include <linux/if_vlan.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090016#include <linux/slab.h>
David Daneyd6aa60a2009-10-14 12:04:41 -070017#include <linux/phy.h>
18#include <linux/spinlock.h>
19
20#include <asm/octeon/octeon.h>
21#include <asm/octeon/cvmx-mixx-defs.h>
22#include <asm/octeon/cvmx-agl-defs.h>
23
24#define DRV_NAME "octeon_mgmt"
25#define DRV_VERSION "2.0"
26#define DRV_DESCRIPTION \
27 "Cavium Networks Octeon MII (management) port Network Driver"
28
29#define OCTEON_MGMT_NAPI_WEIGHT 16
30
31/*
32 * Ring sizes that are powers of two allow for more efficient modulo
33 * opertions.
34 */
35#define OCTEON_MGMT_RX_RING_SIZE 512
36#define OCTEON_MGMT_TX_RING_SIZE 128
37
38/* Allow 8 bytes for vlan and FCS. */
39#define OCTEON_MGMT_RX_HEADROOM (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
40
41union mgmt_port_ring_entry {
42 u64 d64;
43 struct {
44 u64 reserved_62_63:2;
45 /* Length of the buffer/packet in bytes */
46 u64 len:14;
47 /* For TX, signals that the packet should be timestamped */
48 u64 tstamp:1;
49 /* The RX error code */
50 u64 code:7;
51#define RING_ENTRY_CODE_DONE 0xf
52#define RING_ENTRY_CODE_MORE 0x10
53 /* Physical address of the buffer */
54 u64 addr:40;
55 } s;
56};
57
58struct octeon_mgmt {
59 struct net_device *netdev;
60 int port;
61 int irq;
62 u64 *tx_ring;
63 dma_addr_t tx_ring_handle;
64 unsigned int tx_next;
65 unsigned int tx_next_clean;
66 unsigned int tx_current_fill;
67 /* The tx_list lock also protects the ring related variables */
68 struct sk_buff_head tx_list;
69
70 /* RX variables only touched in napi_poll. No locking necessary. */
71 u64 *rx_ring;
72 dma_addr_t rx_ring_handle;
73 unsigned int rx_next;
74 unsigned int rx_next_fill;
75 unsigned int rx_current_fill;
76 struct sk_buff_head rx_list;
77
78 spinlock_t lock;
79 unsigned int last_duplex;
80 unsigned int last_link;
81 struct device *dev;
82 struct napi_struct napi;
83 struct tasklet_struct tx_clean_tasklet;
84 struct phy_device *phydev;
85};
86
87static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable)
88{
89 int port = p->port;
90 union cvmx_mixx_intena mix_intena;
91 unsigned long flags;
92
93 spin_lock_irqsave(&p->lock, flags);
94 mix_intena.u64 = cvmx_read_csr(CVMX_MIXX_INTENA(port));
95 mix_intena.s.ithena = enable ? 1 : 0;
96 cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64);
97 spin_unlock_irqrestore(&p->lock, flags);
98}
99
100static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable)
101{
102 int port = p->port;
103 union cvmx_mixx_intena mix_intena;
104 unsigned long flags;
105
106 spin_lock_irqsave(&p->lock, flags);
107 mix_intena.u64 = cvmx_read_csr(CVMX_MIXX_INTENA(port));
108 mix_intena.s.othena = enable ? 1 : 0;
109 cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64);
110 spin_unlock_irqrestore(&p->lock, flags);
111}
112
113static inline void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p)
114{
115 octeon_mgmt_set_rx_irq(p, 1);
116}
117
118static inline void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p)
119{
120 octeon_mgmt_set_rx_irq(p, 0);
121}
122
123static inline void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p)
124{
125 octeon_mgmt_set_tx_irq(p, 1);
126}
127
128static inline void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p)
129{
130 octeon_mgmt_set_tx_irq(p, 0);
131}
132
133static unsigned int ring_max_fill(unsigned int ring_size)
134{
135 return ring_size - 8;
136}
137
138static unsigned int ring_size_to_bytes(unsigned int ring_size)
139{
140 return ring_size * sizeof(union mgmt_port_ring_entry);
141}
142
143static void octeon_mgmt_rx_fill_ring(struct net_device *netdev)
144{
145 struct octeon_mgmt *p = netdev_priv(netdev);
146 int port = p->port;
147
148 while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) {
149 unsigned int size;
150 union mgmt_port_ring_entry re;
151 struct sk_buff *skb;
152
153 /* CN56XX pass 1 needs 8 bytes of padding. */
154 size = netdev->mtu + OCTEON_MGMT_RX_HEADROOM + 8 + NET_IP_ALIGN;
155
156 skb = netdev_alloc_skb(netdev, size);
157 if (!skb)
158 break;
159 skb_reserve(skb, NET_IP_ALIGN);
160 __skb_queue_tail(&p->rx_list, skb);
161
162 re.d64 = 0;
163 re.s.len = size;
164 re.s.addr = dma_map_single(p->dev, skb->data,
165 size,
166 DMA_FROM_DEVICE);
167
168 /* Put it in the ring. */
169 p->rx_ring[p->rx_next_fill] = re.d64;
170 dma_sync_single_for_device(p->dev, p->rx_ring_handle,
171 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
172 DMA_BIDIRECTIONAL);
173 p->rx_next_fill =
174 (p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE;
175 p->rx_current_fill++;
176 /* Ring the bell. */
177 cvmx_write_csr(CVMX_MIXX_IRING2(port), 1);
178 }
179}
180
181static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
182{
183 int port = p->port;
184 union cvmx_mixx_orcnt mix_orcnt;
185 union mgmt_port_ring_entry re;
186 struct sk_buff *skb;
187 int cleaned = 0;
188 unsigned long flags;
189
190 mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port));
191 while (mix_orcnt.s.orcnt) {
192 dma_sync_single_for_cpu(p->dev, p->tx_ring_handle,
193 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
194 DMA_BIDIRECTIONAL);
195
196 spin_lock_irqsave(&p->tx_list.lock, flags);
197
198 re.d64 = p->tx_ring[p->tx_next_clean];
199 p->tx_next_clean =
200 (p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE;
201 skb = __skb_dequeue(&p->tx_list);
202
203 mix_orcnt.u64 = 0;
204 mix_orcnt.s.orcnt = 1;
205
206 /* Acknowledge to hardware that we have the buffer. */
207 cvmx_write_csr(CVMX_MIXX_ORCNT(port), mix_orcnt.u64);
208 p->tx_current_fill--;
209
210 spin_unlock_irqrestore(&p->tx_list.lock, flags);
211
212 dma_unmap_single(p->dev, re.s.addr, re.s.len,
213 DMA_TO_DEVICE);
214 dev_kfree_skb_any(skb);
215 cleaned++;
216
217 mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port));
218 }
219
220 if (cleaned && netif_queue_stopped(p->netdev))
221 netif_wake_queue(p->netdev);
222}
223
224static void octeon_mgmt_clean_tx_tasklet(unsigned long arg)
225{
226 struct octeon_mgmt *p = (struct octeon_mgmt *)arg;
227 octeon_mgmt_clean_tx_buffers(p);
228 octeon_mgmt_enable_tx_irq(p);
229}
230
231static void octeon_mgmt_update_rx_stats(struct net_device *netdev)
232{
233 struct octeon_mgmt *p = netdev_priv(netdev);
234 int port = p->port;
235 unsigned long flags;
236 u64 drop, bad;
237
238 /* These reads also clear the count registers. */
239 drop = cvmx_read_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(port));
240 bad = cvmx_read_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(port));
241
242 if (drop || bad) {
243 /* Do an atomic update. */
244 spin_lock_irqsave(&p->lock, flags);
245 netdev->stats.rx_errors += bad;
246 netdev->stats.rx_dropped += drop;
247 spin_unlock_irqrestore(&p->lock, flags);
248 }
249}
250
251static void octeon_mgmt_update_tx_stats(struct net_device *netdev)
252{
253 struct octeon_mgmt *p = netdev_priv(netdev);
254 int port = p->port;
255 unsigned long flags;
256
257 union cvmx_agl_gmx_txx_stat0 s0;
258 union cvmx_agl_gmx_txx_stat1 s1;
259
260 /* These reads also clear the count registers. */
261 s0.u64 = cvmx_read_csr(CVMX_AGL_GMX_TXX_STAT0(port));
262 s1.u64 = cvmx_read_csr(CVMX_AGL_GMX_TXX_STAT1(port));
263
264 if (s0.s.xsdef || s0.s.xscol || s1.s.scol || s1.s.mcol) {
265 /* Do an atomic update. */
266 spin_lock_irqsave(&p->lock, flags);
267 netdev->stats.tx_errors += s0.s.xsdef + s0.s.xscol;
268 netdev->stats.collisions += s1.s.scol + s1.s.mcol;
269 spin_unlock_irqrestore(&p->lock, flags);
270 }
271}
272
273/*
274 * Dequeue a receive skb and its corresponding ring entry. The ring
275 * entry is returned, *pskb is updated to point to the skb.
276 */
277static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p,
278 struct sk_buff **pskb)
279{
280 union mgmt_port_ring_entry re;
281
282 dma_sync_single_for_cpu(p->dev, p->rx_ring_handle,
283 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
284 DMA_BIDIRECTIONAL);
285
286 re.d64 = p->rx_ring[p->rx_next];
287 p->rx_next = (p->rx_next + 1) % OCTEON_MGMT_RX_RING_SIZE;
288 p->rx_current_fill--;
289 *pskb = __skb_dequeue(&p->rx_list);
290
291 dma_unmap_single(p->dev, re.s.addr,
292 ETH_FRAME_LEN + OCTEON_MGMT_RX_HEADROOM,
293 DMA_FROM_DEVICE);
294
295 return re.d64;
296}
297
298
299static int octeon_mgmt_receive_one(struct octeon_mgmt *p)
300{
301 int port = p->port;
302 struct net_device *netdev = p->netdev;
303 union cvmx_mixx_ircnt mix_ircnt;
304 union mgmt_port_ring_entry re;
305 struct sk_buff *skb;
306 struct sk_buff *skb2;
307 struct sk_buff *skb_new;
308 union mgmt_port_ring_entry re2;
309 int rc = 1;
310
311
312 re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb);
313 if (likely(re.s.code == RING_ENTRY_CODE_DONE)) {
314 /* A good packet, send it up. */
315 skb_put(skb, re.s.len);
316good:
317 skb->protocol = eth_type_trans(skb, netdev);
318 netdev->stats.rx_packets++;
319 netdev->stats.rx_bytes += skb->len;
David Daneyd6aa60a2009-10-14 12:04:41 -0700320 netif_receive_skb(skb);
321 rc = 0;
322 } else if (re.s.code == RING_ENTRY_CODE_MORE) {
323 /*
324 * Packet split across skbs. This can happen if we
325 * increase the MTU. Buffers that are already in the
326 * rx ring can then end up being too small. As the rx
327 * ring is refilled, buffers sized for the new MTU
328 * will be used and we should go back to the normal
329 * non-split case.
330 */
331 skb_put(skb, re.s.len);
332 do {
333 re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
334 if (re2.s.code != RING_ENTRY_CODE_MORE
335 && re2.s.code != RING_ENTRY_CODE_DONE)
336 goto split_error;
337 skb_put(skb2, re2.s.len);
338 skb_new = skb_copy_expand(skb, 0, skb2->len,
339 GFP_ATOMIC);
340 if (!skb_new)
341 goto split_error;
342 if (skb_copy_bits(skb2, 0, skb_tail_pointer(skb_new),
343 skb2->len))
344 goto split_error;
345 skb_put(skb_new, skb2->len);
346 dev_kfree_skb_any(skb);
347 dev_kfree_skb_any(skb2);
348 skb = skb_new;
349 } while (re2.s.code == RING_ENTRY_CODE_MORE);
350 goto good;
351 } else {
352 /* Some other error, discard it. */
353 dev_kfree_skb_any(skb);
354 /*
355 * Error statistics are accumulated in
356 * octeon_mgmt_update_rx_stats.
357 */
358 }
359 goto done;
360split_error:
361 /* Discard the whole mess. */
362 dev_kfree_skb_any(skb);
363 dev_kfree_skb_any(skb2);
364 while (re2.s.code == RING_ENTRY_CODE_MORE) {
365 re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
366 dev_kfree_skb_any(skb2);
367 }
368 netdev->stats.rx_errors++;
369
370done:
371 /* Tell the hardware we processed a packet. */
372 mix_ircnt.u64 = 0;
373 mix_ircnt.s.ircnt = 1;
374 cvmx_write_csr(CVMX_MIXX_IRCNT(port), mix_ircnt.u64);
375 return rc;
376
377}
378
379static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget)
380{
381 int port = p->port;
382 unsigned int work_done = 0;
383 union cvmx_mixx_ircnt mix_ircnt;
384 int rc;
385
386
387 mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port));
388 while (work_done < budget && mix_ircnt.s.ircnt) {
389
390 rc = octeon_mgmt_receive_one(p);
391 if (!rc)
392 work_done++;
393
394 /* Check for more packets. */
395 mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port));
396 }
397
398 octeon_mgmt_rx_fill_ring(p->netdev);
399
400 return work_done;
401}
402
403static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget)
404{
405 struct octeon_mgmt *p = container_of(napi, struct octeon_mgmt, napi);
406 struct net_device *netdev = p->netdev;
407 unsigned int work_done = 0;
408
409 work_done = octeon_mgmt_receive_packets(p, budget);
410
411 if (work_done < budget) {
412 /* We stopped because no more packets were available. */
413 napi_complete(napi);
414 octeon_mgmt_enable_rx_irq(p);
415 }
416 octeon_mgmt_update_rx_stats(netdev);
417
418 return work_done;
419}
420
421/* Reset the hardware to clean state. */
422static void octeon_mgmt_reset_hw(struct octeon_mgmt *p)
423{
424 union cvmx_mixx_ctl mix_ctl;
425 union cvmx_mixx_bist mix_bist;
426 union cvmx_agl_gmx_bist agl_gmx_bist;
427
428 mix_ctl.u64 = 0;
429 cvmx_write_csr(CVMX_MIXX_CTL(p->port), mix_ctl.u64);
430 do {
431 mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(p->port));
432 } while (mix_ctl.s.busy);
433 mix_ctl.s.reset = 1;
434 cvmx_write_csr(CVMX_MIXX_CTL(p->port), mix_ctl.u64);
435 cvmx_read_csr(CVMX_MIXX_CTL(p->port));
436 cvmx_wait(64);
437
438 mix_bist.u64 = cvmx_read_csr(CVMX_MIXX_BIST(p->port));
439 if (mix_bist.u64)
440 dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n",
441 (unsigned long long)mix_bist.u64);
442
443 agl_gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST);
444 if (agl_gmx_bist.u64)
445 dev_warn(p->dev, "AGL failed BIST (0x%016llx)\n",
446 (unsigned long long)agl_gmx_bist.u64);
447}
448
449struct octeon_mgmt_cam_state {
450 u64 cam[6];
451 u64 cam_mask;
452 int cam_index;
453};
454
455static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs,
456 unsigned char *addr)
457{
458 int i;
459
460 for (i = 0; i < 6; i++)
461 cs->cam[i] |= (u64)addr[i] << (8 * (cs->cam_index));
462 cs->cam_mask |= (1ULL << cs->cam_index);
463 cs->cam_index++;
464}
465
466static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
467{
468 struct octeon_mgmt *p = netdev_priv(netdev);
469 int port = p->port;
David Daneyd6aa60a2009-10-14 12:04:41 -0700470 union cvmx_agl_gmx_rxx_adr_ctl adr_ctl;
471 union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx;
472 unsigned long flags;
473 unsigned int prev_packet_enable;
474 unsigned int cam_mode = 1; /* 1 - Accept on CAM match */
475 unsigned int multicast_mode = 1; /* 1 - Reject all multicast. */
476 struct octeon_mgmt_cam_state cam_state;
Jiri Pirko22bedad32010-04-01 21:22:57 +0000477 struct netdev_hw_addr *ha;
David Daneyd6aa60a2009-10-14 12:04:41 -0700478 int available_cam_entries;
479
480 memset(&cam_state, 0, sizeof(cam_state));
481
David Daney62538d22010-05-05 13:03:08 +0000482 if ((netdev->flags & IFF_PROMISC) || netdev->uc.count > 7) {
David Daneyd6aa60a2009-10-14 12:04:41 -0700483 cam_mode = 0;
484 available_cam_entries = 8;
485 } else {
486 /*
487 * One CAM entry for the primary address, leaves seven
488 * for the secondary addresses.
489 */
David Daney62538d22010-05-05 13:03:08 +0000490 available_cam_entries = 7 - netdev->uc.count;
David Daneyd6aa60a2009-10-14 12:04:41 -0700491 }
492
493 if (netdev->flags & IFF_MULTICAST) {
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000494 if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) ||
495 netdev_mc_count(netdev) > available_cam_entries)
David Daney62538d22010-05-05 13:03:08 +0000496 multicast_mode = 2; /* 2 - Accept all multicast. */
David Daneyd6aa60a2009-10-14 12:04:41 -0700497 else
498 multicast_mode = 0; /* 0 - Use CAM. */
499 }
500
501 if (cam_mode == 1) {
502 /* Add primary address. */
503 octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr);
David Daney62538d22010-05-05 13:03:08 +0000504 netdev_for_each_uc_addr(ha, netdev)
505 octeon_mgmt_cam_state_add(&cam_state, ha->addr);
David Daneyd6aa60a2009-10-14 12:04:41 -0700506 }
507 if (multicast_mode == 0) {
Jiri Pirko22bedad32010-04-01 21:22:57 +0000508 netdev_for_each_mc_addr(ha, netdev)
509 octeon_mgmt_cam_state_add(&cam_state, ha->addr);
David Daneyd6aa60a2009-10-14 12:04:41 -0700510 }
511
512
513 spin_lock_irqsave(&p->lock, flags);
514
515 /* Disable packet I/O. */
516 agl_gmx_prtx.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
517 prev_packet_enable = agl_gmx_prtx.s.en;
518 agl_gmx_prtx.s.en = 0;
519 cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
520
521
522 adr_ctl.u64 = 0;
523 adr_ctl.s.cam_mode = cam_mode;
524 adr_ctl.s.mcst = multicast_mode;
525 adr_ctl.s.bcst = 1; /* Allow broadcast */
526
527 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CTL(port), adr_ctl.u64);
528
529 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM0(port), cam_state.cam[0]);
530 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM1(port), cam_state.cam[1]);
531 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM2(port), cam_state.cam[2]);
532 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM3(port), cam_state.cam[3]);
533 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM4(port), cam_state.cam[4]);
534 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM5(port), cam_state.cam[5]);
535 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM_EN(port), cam_state.cam_mask);
536
537 /* Restore packet I/O. */
538 agl_gmx_prtx.s.en = prev_packet_enable;
539 cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
540
541 spin_unlock_irqrestore(&p->lock, flags);
542}
543
544static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr)
545{
546 struct sockaddr *sa = addr;
547
548 if (!is_valid_ether_addr(sa->sa_data))
549 return -EADDRNOTAVAIL;
550
551 memcpy(netdev->dev_addr, sa->sa_data, ETH_ALEN);
552
553 octeon_mgmt_set_rx_filtering(netdev);
554
555 return 0;
556}
557
558static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
559{
560 struct octeon_mgmt *p = netdev_priv(netdev);
561 int port = p->port;
562 int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM;
563
564 /*
565 * Limit the MTU to make sure the ethernet packets are between
566 * 64 bytes and 16383 bytes.
567 */
568 if (size_without_fcs < 64 || size_without_fcs > 16383) {
569 dev_warn(p->dev, "MTU must be between %d and %d.\n",
570 64 - OCTEON_MGMT_RX_HEADROOM,
571 16383 - OCTEON_MGMT_RX_HEADROOM);
572 return -EINVAL;
573 }
574
575 netdev->mtu = new_mtu;
576
577 cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_MAX(port), size_without_fcs);
578 cvmx_write_csr(CVMX_AGL_GMX_RXX_JABBER(port),
579 (size_without_fcs + 7) & 0xfff8);
580
581 return 0;
582}
583
584static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id)
585{
586 struct net_device *netdev = dev_id;
587 struct octeon_mgmt *p = netdev_priv(netdev);
588 int port = p->port;
589 union cvmx_mixx_isr mixx_isr;
590
591 mixx_isr.u64 = cvmx_read_csr(CVMX_MIXX_ISR(port));
592
593 /* Clear any pending interrupts */
594 cvmx_write_csr(CVMX_MIXX_ISR(port),
595 cvmx_read_csr(CVMX_MIXX_ISR(port)));
596 cvmx_read_csr(CVMX_MIXX_ISR(port));
597
598 if (mixx_isr.s.irthresh) {
599 octeon_mgmt_disable_rx_irq(p);
600 napi_schedule(&p->napi);
601 }
602 if (mixx_isr.s.orthresh) {
603 octeon_mgmt_disable_tx_irq(p);
604 tasklet_schedule(&p->tx_clean_tasklet);
605 }
606
607 return IRQ_HANDLED;
608}
609
610static int octeon_mgmt_ioctl(struct net_device *netdev,
611 struct ifreq *rq, int cmd)
612{
613 struct octeon_mgmt *p = netdev_priv(netdev);
614
615 if (!netif_running(netdev))
616 return -EINVAL;
617
618 if (!p->phydev)
619 return -EINVAL;
620
621 return phy_mii_ioctl(p->phydev, if_mii(rq), cmd);
622}
623
624static void octeon_mgmt_adjust_link(struct net_device *netdev)
625{
626 struct octeon_mgmt *p = netdev_priv(netdev);
627 int port = p->port;
628 union cvmx_agl_gmx_prtx_cfg prtx_cfg;
629 unsigned long flags;
630 int link_changed = 0;
631
632 spin_lock_irqsave(&p->lock, flags);
633 if (p->phydev->link) {
634 if (!p->last_link)
635 link_changed = 1;
636 if (p->last_duplex != p->phydev->duplex) {
637 p->last_duplex = p->phydev->duplex;
638 prtx_cfg.u64 =
639 cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
640 prtx_cfg.s.duplex = p->phydev->duplex;
641 cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port),
642 prtx_cfg.u64);
643 }
644 } else {
645 if (p->last_link)
646 link_changed = -1;
647 }
648 p->last_link = p->phydev->link;
649 spin_unlock_irqrestore(&p->lock, flags);
650
651 if (link_changed != 0) {
652 if (link_changed > 0) {
653 netif_carrier_on(netdev);
654 pr_info("%s: Link is up - %d/%s\n", netdev->name,
655 p->phydev->speed,
656 DUPLEX_FULL == p->phydev->duplex ?
657 "Full" : "Half");
658 } else {
659 netif_carrier_off(netdev);
660 pr_info("%s: Link is down\n", netdev->name);
661 }
662 }
663}
664
665static int octeon_mgmt_init_phy(struct net_device *netdev)
666{
667 struct octeon_mgmt *p = netdev_priv(netdev);
668 char phy_id[20];
669
670 if (octeon_is_simulation()) {
671 /* No PHYs in the simulator. */
672 netif_carrier_on(netdev);
673 return 0;
674 }
675
676 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, "0", p->port);
677
678 p->phydev = phy_connect(netdev, phy_id, octeon_mgmt_adjust_link, 0,
679 PHY_INTERFACE_MODE_MII);
680
681 if (IS_ERR(p->phydev)) {
682 p->phydev = NULL;
683 return -1;
684 }
685
686 phy_start_aneg(p->phydev);
687
688 return 0;
689}
690
691static int octeon_mgmt_open(struct net_device *netdev)
692{
693 struct octeon_mgmt *p = netdev_priv(netdev);
694 int port = p->port;
695 union cvmx_mixx_ctl mix_ctl;
696 union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode;
697 union cvmx_mixx_oring1 oring1;
698 union cvmx_mixx_iring1 iring1;
699 union cvmx_agl_gmx_prtx_cfg prtx_cfg;
700 union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
701 union cvmx_mixx_irhwm mix_irhwm;
702 union cvmx_mixx_orhwm mix_orhwm;
703 union cvmx_mixx_intena mix_intena;
704 struct sockaddr sa;
705
706 /* Allocate ring buffers. */
707 p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
708 GFP_KERNEL);
709 if (!p->tx_ring)
710 return -ENOMEM;
711 p->tx_ring_handle =
712 dma_map_single(p->dev, p->tx_ring,
713 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
714 DMA_BIDIRECTIONAL);
715 p->tx_next = 0;
716 p->tx_next_clean = 0;
717 p->tx_current_fill = 0;
718
719
720 p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
721 GFP_KERNEL);
722 if (!p->rx_ring)
723 goto err_nomem;
724 p->rx_ring_handle =
725 dma_map_single(p->dev, p->rx_ring,
726 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
727 DMA_BIDIRECTIONAL);
728
729 p->rx_next = 0;
730 p->rx_next_fill = 0;
731 p->rx_current_fill = 0;
732
733 octeon_mgmt_reset_hw(p);
734
735 mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));
736
737 /* Bring it out of reset if needed. */
738 if (mix_ctl.s.reset) {
739 mix_ctl.s.reset = 0;
740 cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);
741 do {
742 mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));
743 } while (mix_ctl.s.reset);
744 }
745
746 agl_gmx_inf_mode.u64 = 0;
747 agl_gmx_inf_mode.s.en = 1;
748 cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
749
750 oring1.u64 = 0;
751 oring1.s.obase = p->tx_ring_handle >> 3;
752 oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE;
753 cvmx_write_csr(CVMX_MIXX_ORING1(port), oring1.u64);
754
755 iring1.u64 = 0;
756 iring1.s.ibase = p->rx_ring_handle >> 3;
757 iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE;
758 cvmx_write_csr(CVMX_MIXX_IRING1(port), iring1.u64);
759
760 /* Disable packet I/O. */
761 prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
762 prtx_cfg.s.en = 0;
763 cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64);
764
765 memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN);
766 octeon_mgmt_set_mac_address(netdev, &sa);
767
768 octeon_mgmt_change_mtu(netdev, netdev->mtu);
769
770 /*
771 * Enable the port HW. Packets are not allowed until
772 * cvmx_mgmt_port_enable() is called.
773 */
774 mix_ctl.u64 = 0;
775 mix_ctl.s.crc_strip = 1; /* Strip the ending CRC */
776 mix_ctl.s.en = 1; /* Enable the port */
777 mix_ctl.s.nbtarb = 0; /* Arbitration mode */
778 /* MII CB-request FIFO programmable high watermark */
779 mix_ctl.s.mrq_hwm = 1;
780 cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);
781
782 if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
783 || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
784 /*
785 * Force compensation values, as they are not
786 * determined properly by HW
787 */
788 union cvmx_agl_gmx_drv_ctl drv_ctl;
789
790 drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
791 if (port) {
792 drv_ctl.s.byp_en1 = 1;
793 drv_ctl.s.nctl1 = 6;
794 drv_ctl.s.pctl1 = 6;
795 } else {
796 drv_ctl.s.byp_en = 1;
797 drv_ctl.s.nctl = 6;
798 drv_ctl.s.pctl = 6;
799 }
800 cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64);
801 }
802
803 octeon_mgmt_rx_fill_ring(netdev);
804
805 /* Clear statistics. */
806 /* Clear on read. */
807 cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_CTL(port), 1);
808 cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(port), 0);
809 cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(port), 0);
810
811 cvmx_write_csr(CVMX_AGL_GMX_TXX_STATS_CTL(port), 1);
812 cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT0(port), 0);
813 cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT1(port), 0);
814
815 /* Clear any pending interrupts */
816 cvmx_write_csr(CVMX_MIXX_ISR(port), cvmx_read_csr(CVMX_MIXX_ISR(port)));
817
818 if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name,
819 netdev)) {
820 dev_err(p->dev, "request_irq(%d) failed.\n", p->irq);
821 goto err_noirq;
822 }
823
824 /* Interrupt every single RX packet */
825 mix_irhwm.u64 = 0;
826 mix_irhwm.s.irhwm = 0;
827 cvmx_write_csr(CVMX_MIXX_IRHWM(port), mix_irhwm.u64);
828
829 /* Interrupt when we have 5 or more packets to clean. */
830 mix_orhwm.u64 = 0;
831 mix_orhwm.s.orhwm = 5;
832 cvmx_write_csr(CVMX_MIXX_ORHWM(port), mix_orhwm.u64);
833
834 /* Enable receive and transmit interrupts */
835 mix_intena.u64 = 0;
836 mix_intena.s.ithena = 1;
837 mix_intena.s.othena = 1;
838 cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64);
839
840
841 /* Enable packet I/O. */
842
843 rxx_frm_ctl.u64 = 0;
844 rxx_frm_ctl.s.pre_align = 1;
845 /*
846 * When set, disables the length check for non-min sized pkts
847 * with padding in the client data.
848 */
849 rxx_frm_ctl.s.pad_len = 1;
850 /* When set, disables the length check for VLAN pkts */
851 rxx_frm_ctl.s.vlan_len = 1;
852 /* When set, PREAMBLE checking is less strict */
853 rxx_frm_ctl.s.pre_free = 1;
854 /* Control Pause Frames can match station SMAC */
855 rxx_frm_ctl.s.ctl_smac = 0;
856 /* Control Pause Frames can match globally assign Multicast address */
857 rxx_frm_ctl.s.ctl_mcst = 1;
858 /* Forward pause information to TX block */
859 rxx_frm_ctl.s.ctl_bck = 1;
860 /* Drop Control Pause Frames */
861 rxx_frm_ctl.s.ctl_drp = 1;
862 /* Strip off the preamble */
863 rxx_frm_ctl.s.pre_strp = 1;
864 /*
865 * This port is configured to send PREAMBLE+SFD to begin every
866 * frame. GMX checks that the PREAMBLE is sent correctly.
867 */
868 rxx_frm_ctl.s.pre_chk = 1;
869 cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_CTL(port), rxx_frm_ctl.u64);
870
871 /* Enable the AGL block */
872 agl_gmx_inf_mode.u64 = 0;
873 agl_gmx_inf_mode.s.en = 1;
874 cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
875
876 /* Configure the port duplex and enables */
877 prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
878 prtx_cfg.s.tx_en = 1;
879 prtx_cfg.s.rx_en = 1;
880 prtx_cfg.s.en = 1;
881 p->last_duplex = 1;
882 prtx_cfg.s.duplex = p->last_duplex;
883 cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64);
884
885 p->last_link = 0;
886 netif_carrier_off(netdev);
887
888 if (octeon_mgmt_init_phy(netdev)) {
889 dev_err(p->dev, "Cannot initialize PHY.\n");
890 goto err_noirq;
891 }
892
893 netif_wake_queue(netdev);
894 napi_enable(&p->napi);
895
896 return 0;
897err_noirq:
898 octeon_mgmt_reset_hw(p);
899 dma_unmap_single(p->dev, p->rx_ring_handle,
900 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
901 DMA_BIDIRECTIONAL);
902 kfree(p->rx_ring);
903err_nomem:
904 dma_unmap_single(p->dev, p->tx_ring_handle,
905 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
906 DMA_BIDIRECTIONAL);
907 kfree(p->tx_ring);
908 return -ENOMEM;
909}
910
911static int octeon_mgmt_stop(struct net_device *netdev)
912{
913 struct octeon_mgmt *p = netdev_priv(netdev);
914
915 napi_disable(&p->napi);
916 netif_stop_queue(netdev);
917
918 if (p->phydev)
919 phy_disconnect(p->phydev);
920
921 netif_carrier_off(netdev);
922
923 octeon_mgmt_reset_hw(p);
924
925
926 free_irq(p->irq, netdev);
927
928 /* dma_unmap is a nop on Octeon, so just free everything. */
929 skb_queue_purge(&p->tx_list);
930 skb_queue_purge(&p->rx_list);
931
932 dma_unmap_single(p->dev, p->rx_ring_handle,
933 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
934 DMA_BIDIRECTIONAL);
935 kfree(p->rx_ring);
936
937 dma_unmap_single(p->dev, p->tx_ring_handle,
938 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
939 DMA_BIDIRECTIONAL);
940 kfree(p->tx_ring);
941
942
943 return 0;
944}
945
946static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
947{
948 struct octeon_mgmt *p = netdev_priv(netdev);
949 int port = p->port;
950 union mgmt_port_ring_entry re;
951 unsigned long flags;
952
953 re.d64 = 0;
954 re.s.len = skb->len;
955 re.s.addr = dma_map_single(p->dev, skb->data,
956 skb->len,
957 DMA_TO_DEVICE);
958
959 spin_lock_irqsave(&p->tx_list.lock, flags);
960
961 if (unlikely(p->tx_current_fill >=
962 ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) {
963 spin_unlock_irqrestore(&p->tx_list.lock, flags);
964
965 dma_unmap_single(p->dev, re.s.addr, re.s.len,
966 DMA_TO_DEVICE);
967
968 netif_stop_queue(netdev);
969 return NETDEV_TX_BUSY;
970 }
971
972 __skb_queue_tail(&p->tx_list, skb);
973
974 /* Put it in the ring. */
975 p->tx_ring[p->tx_next] = re.d64;
976 p->tx_next = (p->tx_next + 1) % OCTEON_MGMT_TX_RING_SIZE;
977 p->tx_current_fill++;
978
979 spin_unlock_irqrestore(&p->tx_list.lock, flags);
980
981 dma_sync_single_for_device(p->dev, p->tx_ring_handle,
982 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
983 DMA_BIDIRECTIONAL);
984
985 netdev->stats.tx_packets++;
986 netdev->stats.tx_bytes += skb->len;
987
988 /* Ring the bell. */
989 cvmx_write_csr(CVMX_MIXX_ORING2(port), 1);
990
991 netdev->trans_start = jiffies;
992 octeon_mgmt_clean_tx_buffers(p);
993 octeon_mgmt_update_tx_stats(netdev);
994 return NETDEV_TX_OK;
995}
996
997#ifdef CONFIG_NET_POLL_CONTROLLER
998static void octeon_mgmt_poll_controller(struct net_device *netdev)
999{
1000 struct octeon_mgmt *p = netdev_priv(netdev);
1001
1002 octeon_mgmt_receive_packets(p, 16);
1003 octeon_mgmt_update_rx_stats(netdev);
1004 return;
1005}
1006#endif
1007
1008static void octeon_mgmt_get_drvinfo(struct net_device *netdev,
1009 struct ethtool_drvinfo *info)
1010{
1011 strncpy(info->driver, DRV_NAME, sizeof(info->driver));
1012 strncpy(info->version, DRV_VERSION, sizeof(info->version));
1013 strncpy(info->fw_version, "N/A", sizeof(info->fw_version));
1014 strncpy(info->bus_info, "N/A", sizeof(info->bus_info));
1015 info->n_stats = 0;
1016 info->testinfo_len = 0;
1017 info->regdump_len = 0;
1018 info->eedump_len = 0;
1019}
1020
1021static int octeon_mgmt_get_settings(struct net_device *netdev,
1022 struct ethtool_cmd *cmd)
1023{
1024 struct octeon_mgmt *p = netdev_priv(netdev);
1025
1026 if (p->phydev)
1027 return phy_ethtool_gset(p->phydev, cmd);
1028
1029 return -EINVAL;
1030}
1031
1032static int octeon_mgmt_set_settings(struct net_device *netdev,
1033 struct ethtool_cmd *cmd)
1034{
1035 struct octeon_mgmt *p = netdev_priv(netdev);
1036
1037 if (!capable(CAP_NET_ADMIN))
1038 return -EPERM;
1039
1040 if (p->phydev)
1041 return phy_ethtool_sset(p->phydev, cmd);
1042
1043 return -EINVAL;
1044}
1045
1046static const struct ethtool_ops octeon_mgmt_ethtool_ops = {
1047 .get_drvinfo = octeon_mgmt_get_drvinfo,
1048 .get_link = ethtool_op_get_link,
1049 .get_settings = octeon_mgmt_get_settings,
1050 .set_settings = octeon_mgmt_set_settings
1051};
1052
1053static const struct net_device_ops octeon_mgmt_ops = {
1054 .ndo_open = octeon_mgmt_open,
1055 .ndo_stop = octeon_mgmt_stop,
1056 .ndo_start_xmit = octeon_mgmt_xmit,
1057 .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering,
1058 .ndo_set_multicast_list = octeon_mgmt_set_rx_filtering,
1059 .ndo_set_mac_address = octeon_mgmt_set_mac_address,
1060 .ndo_do_ioctl = octeon_mgmt_ioctl,
1061 .ndo_change_mtu = octeon_mgmt_change_mtu,
1062#ifdef CONFIG_NET_POLL_CONTROLLER
1063 .ndo_poll_controller = octeon_mgmt_poll_controller,
1064#endif
1065};
1066
1067static int __init octeon_mgmt_probe(struct platform_device *pdev)
1068{
1069 struct resource *res_irq;
1070 struct net_device *netdev;
1071 struct octeon_mgmt *p;
1072 int i;
1073
1074 netdev = alloc_etherdev(sizeof(struct octeon_mgmt));
1075 if (netdev == NULL)
1076 return -ENOMEM;
1077
1078 dev_set_drvdata(&pdev->dev, netdev);
1079 p = netdev_priv(netdev);
1080 netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll,
1081 OCTEON_MGMT_NAPI_WEIGHT);
1082
1083 p->netdev = netdev;
1084 p->dev = &pdev->dev;
1085
1086 p->port = pdev->id;
1087 snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port);
1088
1089 res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1090 if (!res_irq)
1091 goto err;
1092
1093 p->irq = res_irq->start;
1094 spin_lock_init(&p->lock);
1095
1096 skb_queue_head_init(&p->tx_list);
1097 skb_queue_head_init(&p->rx_list);
1098 tasklet_init(&p->tx_clean_tasklet,
1099 octeon_mgmt_clean_tx_tasklet, (unsigned long)p);
1100
1101 netdev->netdev_ops = &octeon_mgmt_ops;
1102 netdev->ethtool_ops = &octeon_mgmt_ethtool_ops;
1103
1104
1105 /* The mgmt ports get the first N MACs. */
1106 for (i = 0; i < 6; i++)
1107 netdev->dev_addr[i] = octeon_bootinfo->mac_addr_base[i];
1108 netdev->dev_addr[5] += p->port;
1109
1110 if (p->port >= octeon_bootinfo->mac_addr_count)
1111 dev_err(&pdev->dev,
H Hartley Sweetene5834822009-12-29 20:09:07 -08001112 "Error %s: Using MAC outside of the assigned range: %pM\n",
1113 netdev->name, netdev->dev_addr);
David Daneyd6aa60a2009-10-14 12:04:41 -07001114
1115 if (register_netdev(netdev))
1116 goto err;
1117
1118 dev_info(&pdev->dev, "Version " DRV_VERSION "\n");
1119 return 0;
1120err:
1121 free_netdev(netdev);
1122 return -ENOENT;
1123}
1124
1125static int __exit octeon_mgmt_remove(struct platform_device *pdev)
1126{
1127 struct net_device *netdev = dev_get_drvdata(&pdev->dev);
1128
1129 unregister_netdev(netdev);
1130 free_netdev(netdev);
1131 return 0;
1132}
1133
1134static struct platform_driver octeon_mgmt_driver = {
1135 .driver = {
1136 .name = "octeon_mgmt",
1137 .owner = THIS_MODULE,
1138 },
1139 .probe = octeon_mgmt_probe,
1140 .remove = __exit_p(octeon_mgmt_remove),
1141};
1142
1143extern void octeon_mdiobus_force_mod_depencency(void);
1144
1145static int __init octeon_mgmt_mod_init(void)
1146{
1147 /* Force our mdiobus driver module to be loaded first. */
1148 octeon_mdiobus_force_mod_depencency();
1149 return platform_driver_register(&octeon_mgmt_driver);
1150}
1151
1152static void __exit octeon_mgmt_mod_exit(void)
1153{
1154 platform_driver_unregister(&octeon_mgmt_driver);
1155}
1156
1157module_init(octeon_mgmt_mod_init);
1158module_exit(octeon_mgmt_mod_exit);
1159
1160MODULE_DESCRIPTION(DRV_DESCRIPTION);
1161MODULE_AUTHOR("David Daney");
1162MODULE_LICENSE("GPL");
1163MODULE_VERSION(DRV_VERSION);