blob: 7eca5a1747337db6f2eed66803e81b081201e38f [file] [log] [blame]
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001/*
2 * Driver for (BCM4706)? GBit MAC core on BCMA bus.
3 *
4 * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
5 *
6 * Licensed under the GNU/GPL. See COPYING for details.
7 */
8
9#include "bgmac.h"
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/delay.h>
14#include <linux/etherdevice.h>
15#include <linux/mii.h>
Rafał Miłecki11e5e762013-03-07 01:53:28 +000016#include <linux/phy.h>
Rafał Miłeckidd4544f2013-01-08 20:06:23 +000017#include <linux/interrupt.h>
18#include <linux/dma-mapping.h>
Ralf Baechleedb15d82013-02-21 16:16:55 +010019#include <bcm47xx_nvram.h>
Rafał Miłeckidd4544f2013-01-08 20:06:23 +000020
21static const struct bcma_device_id bgmac_bcma_tbl[] = {
22 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_4706_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS),
23 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS),
24 BCMA_CORETABLE_END
25};
26MODULE_DEVICE_TABLE(bcma, bgmac_bcma_tbl);
27
28static bool bgmac_wait_value(struct bcma_device *core, u16 reg, u32 mask,
29 u32 value, int timeout)
30{
31 u32 val;
32 int i;
33
34 for (i = 0; i < timeout / 10; i++) {
35 val = bcma_read32(core, reg);
36 if ((val & mask) == value)
37 return true;
38 udelay(10);
39 }
40 pr_err("Timeout waiting for reg 0x%X\n", reg);
41 return false;
42}
43
44/**************************************************
45 * DMA
46 **************************************************/
47
48static void bgmac_dma_tx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
49{
50 u32 val;
51 int i;
52
53 if (!ring->mmio_base)
54 return;
55
56 /* Suspend DMA TX ring first.
57 * bgmac_wait_value doesn't support waiting for any of few values, so
58 * implement whole loop here.
59 */
60 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL,
61 BGMAC_DMA_TX_SUSPEND);
62 for (i = 0; i < 10000 / 10; i++) {
63 val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
64 val &= BGMAC_DMA_TX_STAT;
65 if (val == BGMAC_DMA_TX_STAT_DISABLED ||
66 val == BGMAC_DMA_TX_STAT_IDLEWAIT ||
67 val == BGMAC_DMA_TX_STAT_STOPPED) {
68 i = 0;
69 break;
70 }
71 udelay(10);
72 }
73 if (i)
74 bgmac_err(bgmac, "Timeout suspending DMA TX ring 0x%X (BGMAC_DMA_TX_STAT: 0x%08X)\n",
75 ring->mmio_base, val);
76
77 /* Remove SUSPEND bit */
78 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, 0);
79 if (!bgmac_wait_value(bgmac->core,
80 ring->mmio_base + BGMAC_DMA_TX_STATUS,
81 BGMAC_DMA_TX_STAT, BGMAC_DMA_TX_STAT_DISABLED,
82 10000)) {
83 bgmac_warn(bgmac, "DMA TX ring 0x%X wasn't disabled on time, waiting additional 300us\n",
84 ring->mmio_base);
85 udelay(300);
86 val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
87 if ((val & BGMAC_DMA_TX_STAT) != BGMAC_DMA_TX_STAT_DISABLED)
88 bgmac_err(bgmac, "Reset of DMA TX ring 0x%X failed\n",
89 ring->mmio_base);
90 }
91}
92
93static void bgmac_dma_tx_enable(struct bgmac *bgmac,
94 struct bgmac_dma_ring *ring)
95{
96 u32 ctl;
97
98 ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL);
99 ctl |= BGMAC_DMA_TX_ENABLE;
100 ctl |= BGMAC_DMA_TX_PARITY_DISABLE;
101 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, ctl);
102}
103
104static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
105 struct bgmac_dma_ring *ring,
106 struct sk_buff *skb)
107{
108 struct device *dma_dev = bgmac->core->dma_dev;
109 struct net_device *net_dev = bgmac->net_dev;
110 struct bgmac_dma_desc *dma_desc;
111 struct bgmac_slot_info *slot;
112 u32 ctl0, ctl1;
113 int free_slots;
114
115 if (skb->len > BGMAC_DESC_CTL1_LEN) {
116 bgmac_err(bgmac, "Too long skb (%d)\n", skb->len);
117 goto err_stop_drop;
118 }
119
120 if (ring->start <= ring->end)
121 free_slots = ring->start - ring->end + BGMAC_TX_RING_SLOTS;
122 else
123 free_slots = ring->start - ring->end;
124 if (free_slots == 1) {
125 bgmac_err(bgmac, "TX ring is full, queue should be stopped!\n");
126 netif_stop_queue(net_dev);
127 return NETDEV_TX_BUSY;
128 }
129
130 slot = &ring->slots[ring->end];
131 slot->skb = skb;
132 slot->dma_addr = dma_map_single(dma_dev, skb->data, skb->len,
133 DMA_TO_DEVICE);
134 if (dma_mapping_error(dma_dev, slot->dma_addr)) {
135 bgmac_err(bgmac, "Mapping error of skb on ring 0x%X\n",
136 ring->mmio_base);
137 goto err_stop_drop;
138 }
139
140 ctl0 = BGMAC_DESC_CTL0_IOC | BGMAC_DESC_CTL0_SOF | BGMAC_DESC_CTL0_EOF;
141 if (ring->end == ring->num_slots - 1)
142 ctl0 |= BGMAC_DESC_CTL0_EOT;
143 ctl1 = skb->len & BGMAC_DESC_CTL1_LEN;
144
145 dma_desc = ring->cpu_base;
146 dma_desc += ring->end;
147 dma_desc->addr_low = cpu_to_le32(lower_32_bits(slot->dma_addr));
148 dma_desc->addr_high = cpu_to_le32(upper_32_bits(slot->dma_addr));
149 dma_desc->ctl0 = cpu_to_le32(ctl0);
150 dma_desc->ctl1 = cpu_to_le32(ctl1);
151
Hauke Mehrtens49a467b2013-09-29 13:54:58 +0200152 netdev_sent_queue(net_dev, skb->len);
153
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000154 wmb();
155
156 /* Increase ring->end to point empty slot. We tell hardware the first
157 * slot it should *not* read.
158 */
159 if (++ring->end >= BGMAC_TX_RING_SLOTS)
160 ring->end = 0;
161 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
Rafał Miłecki99003032013-09-15 23:13:18 +0200162 ring->index_base +
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000163 ring->end * sizeof(struct bgmac_dma_desc));
164
165 /* Always keep one slot free to allow detecting bugged calls. */
166 if (--free_slots == 1)
167 netif_stop_queue(net_dev);
168
169 return NETDEV_TX_OK;
170
171err_stop_drop:
172 netif_stop_queue(net_dev);
173 dev_kfree_skb(skb);
174 return NETDEV_TX_OK;
175}
176
177/* Free transmitted packets */
178static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
179{
180 struct device *dma_dev = bgmac->core->dma_dev;
181 int empty_slot;
182 bool freed = false;
Hauke Mehrtens49a467b2013-09-29 13:54:58 +0200183 unsigned bytes_compl = 0, pkts_compl = 0;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000184
185 /* The last slot that hardware didn't consume yet */
186 empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
187 empty_slot &= BGMAC_DMA_TX_STATDPTR;
Rafał Miłecki99003032013-09-15 23:13:18 +0200188 empty_slot -= ring->index_base;
189 empty_slot &= BGMAC_DMA_TX_STATDPTR;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000190 empty_slot /= sizeof(struct bgmac_dma_desc);
191
192 while (ring->start != empty_slot) {
193 struct bgmac_slot_info *slot = &ring->slots[ring->start];
194
195 if (slot->skb) {
196 /* Unmap no longer used buffer */
197 dma_unmap_single(dma_dev, slot->dma_addr,
198 slot->skb->len, DMA_TO_DEVICE);
199 slot->dma_addr = 0;
200
Hauke Mehrtens49a467b2013-09-29 13:54:58 +0200201 bytes_compl += slot->skb->len;
202 pkts_compl++;
203
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000204 /* Free memory! :) */
205 dev_kfree_skb(slot->skb);
206 slot->skb = NULL;
207 } else {
208 bgmac_err(bgmac, "Hardware reported transmission for empty TX ring slot %d! End of ring: %d\n",
209 ring->start, ring->end);
210 }
211
212 if (++ring->start >= BGMAC_TX_RING_SLOTS)
213 ring->start = 0;
214 freed = true;
215 }
216
Hauke Mehrtens49a467b2013-09-29 13:54:58 +0200217 netdev_completed_queue(bgmac->net_dev, pkts_compl, bytes_compl);
218
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000219 if (freed && netif_queue_stopped(bgmac->net_dev))
220 netif_wake_queue(bgmac->net_dev);
221}
222
223static void bgmac_dma_rx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
224{
225 if (!ring->mmio_base)
226 return;
227
228 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, 0);
229 if (!bgmac_wait_value(bgmac->core,
230 ring->mmio_base + BGMAC_DMA_RX_STATUS,
231 BGMAC_DMA_RX_STAT, BGMAC_DMA_RX_STAT_DISABLED,
232 10000))
233 bgmac_err(bgmac, "Reset of ring 0x%X RX failed\n",
234 ring->mmio_base);
235}
236
237static void bgmac_dma_rx_enable(struct bgmac *bgmac,
238 struct bgmac_dma_ring *ring)
239{
240 u32 ctl;
241
242 ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL);
243 ctl &= BGMAC_DMA_RX_ADDREXT_MASK;
244 ctl |= BGMAC_DMA_RX_ENABLE;
245 ctl |= BGMAC_DMA_RX_PARITY_DISABLE;
246 ctl |= BGMAC_DMA_RX_OVERFLOW_CONT;
247 ctl |= BGMAC_RX_FRAME_OFFSET << BGMAC_DMA_RX_FRAME_OFFSET_SHIFT;
248 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, ctl);
249}
250
251static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
252 struct bgmac_slot_info *slot)
253{
254 struct device *dma_dev = bgmac->core->dma_dev;
255 struct bgmac_rx_header *rx;
256
257 /* Alloc skb */
258 slot->skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE);
Joe Perches720a43e2013-03-08 15:03:25 +0000259 if (!slot->skb)
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000260 return -ENOMEM;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000261
262 /* Poison - if everything goes fine, hardware will overwrite it */
263 rx = (struct bgmac_rx_header *)slot->skb->data;
264 rx->len = cpu_to_le16(0xdead);
265 rx->flags = cpu_to_le16(0xbeef);
266
267 /* Map skb for the DMA */
268 slot->dma_addr = dma_map_single(dma_dev, slot->skb->data,
269 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
270 if (dma_mapping_error(dma_dev, slot->dma_addr)) {
271 bgmac_err(bgmac, "DMA mapping error\n");
272 return -ENOMEM;
273 }
274 if (slot->dma_addr & 0xC0000000)
275 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
276
277 return 0;
278}
279
280static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
281 int weight)
282{
283 u32 end_slot;
284 int handled = 0;
285
286 end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS);
287 end_slot &= BGMAC_DMA_RX_STATDPTR;
Rafał Miłecki99003032013-09-15 23:13:18 +0200288 end_slot -= ring->index_base;
289 end_slot &= BGMAC_DMA_RX_STATDPTR;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000290 end_slot /= sizeof(struct bgmac_dma_desc);
291
292 ring->end = end_slot;
293
294 while (ring->start != ring->end) {
295 struct device *dma_dev = bgmac->core->dma_dev;
296 struct bgmac_slot_info *slot = &ring->slots[ring->start];
297 struct sk_buff *skb = slot->skb;
298 struct sk_buff *new_skb;
299 struct bgmac_rx_header *rx;
300 u16 len, flags;
301
302 /* Unmap buffer to make it accessible to the CPU */
303 dma_sync_single_for_cpu(dma_dev, slot->dma_addr,
304 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
305
306 /* Get info from the header */
307 rx = (struct bgmac_rx_header *)skb->data;
308 len = le16_to_cpu(rx->len);
309 flags = le16_to_cpu(rx->flags);
310
311 /* Check for poison and drop or pass the packet */
312 if (len == 0xdead && flags == 0xbeef) {
313 bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
314 ring->start);
315 } else {
Hauke Mehrtens02e71122013-02-28 07:16:54 +0000316 /* Omit CRC. */
317 len -= ETH_FCS_LEN;
318
Hauke Mehrtens885d2992013-02-16 11:10:38 +0000319 new_skb = netdev_alloc_skb_ip_align(bgmac->net_dev, len);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000320 if (new_skb) {
321 skb_put(new_skb, len);
322 skb_copy_from_linear_data_offset(skb, BGMAC_RX_FRAME_OFFSET,
323 new_skb->data,
324 len);
Hauke Mehrtens02e71122013-02-28 07:16:54 +0000325 skb_checksum_none_assert(skb);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000326 new_skb->protocol =
327 eth_type_trans(new_skb, bgmac->net_dev);
328 netif_receive_skb(new_skb);
329 handled++;
330 } else {
331 bgmac->net_dev->stats.rx_dropped++;
332 bgmac_err(bgmac, "Allocation of skb for copying packet failed!\n");
333 }
334
335 /* Poison the old skb */
336 rx->len = cpu_to_le16(0xdead);
337 rx->flags = cpu_to_le16(0xbeef);
338 }
339
340 /* Make it back accessible to the hardware */
341 dma_sync_single_for_device(dma_dev, slot->dma_addr,
342 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
343
344 if (++ring->start >= BGMAC_RX_RING_SLOTS)
345 ring->start = 0;
346
347 if (handled >= weight) /* Should never be greater */
348 break;
349 }
350
351 return handled;
352}
353
354/* Does ring support unaligned addressing? */
355static bool bgmac_dma_unaligned(struct bgmac *bgmac,
356 struct bgmac_dma_ring *ring,
357 enum bgmac_dma_ring_type ring_type)
358{
359 switch (ring_type) {
360 case BGMAC_DMA_RING_TX:
361 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
362 0xff0);
363 if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO))
364 return true;
365 break;
366 case BGMAC_DMA_RING_RX:
367 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
368 0xff0);
369 if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO))
370 return true;
371 break;
372 }
373 return false;
374}
375
376static void bgmac_dma_ring_free(struct bgmac *bgmac,
377 struct bgmac_dma_ring *ring)
378{
379 struct device *dma_dev = bgmac->core->dma_dev;
380 struct bgmac_slot_info *slot;
381 int size;
382 int i;
383
384 for (i = 0; i < ring->num_slots; i++) {
385 slot = &ring->slots[i];
386 if (slot->skb) {
387 if (slot->dma_addr)
388 dma_unmap_single(dma_dev, slot->dma_addr,
389 slot->skb->len, DMA_TO_DEVICE);
390 dev_kfree_skb(slot->skb);
391 }
392 }
393
394 if (ring->cpu_base) {
395 /* Free ring of descriptors */
396 size = ring->num_slots * sizeof(struct bgmac_dma_desc);
397 dma_free_coherent(dma_dev, size, ring->cpu_base,
398 ring->dma_base);
399 }
400}
401
402static void bgmac_dma_free(struct bgmac *bgmac)
403{
404 int i;
405
406 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
407 bgmac_dma_ring_free(bgmac, &bgmac->tx_ring[i]);
408 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
409 bgmac_dma_ring_free(bgmac, &bgmac->rx_ring[i]);
410}
411
412static int bgmac_dma_alloc(struct bgmac *bgmac)
413{
414 struct device *dma_dev = bgmac->core->dma_dev;
415 struct bgmac_dma_ring *ring;
416 static const u16 ring_base[] = { BGMAC_DMA_BASE0, BGMAC_DMA_BASE1,
417 BGMAC_DMA_BASE2, BGMAC_DMA_BASE3, };
418 int size; /* ring size: different for Tx and Rx */
419 int err;
420 int i;
421
422 BUILD_BUG_ON(BGMAC_MAX_TX_RINGS > ARRAY_SIZE(ring_base));
423 BUILD_BUG_ON(BGMAC_MAX_RX_RINGS > ARRAY_SIZE(ring_base));
424
425 if (!(bcma_aread32(bgmac->core, BCMA_IOST) & BCMA_IOST_DMA64)) {
426 bgmac_err(bgmac, "Core does not report 64-bit DMA\n");
427 return -ENOTSUPP;
428 }
429
430 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
431 ring = &bgmac->tx_ring[i];
432 ring->num_slots = BGMAC_TX_RING_SLOTS;
433 ring->mmio_base = ring_base[i];
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000434
435 /* Alloc ring of descriptors */
436 size = ring->num_slots * sizeof(struct bgmac_dma_desc);
437 ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
438 &ring->dma_base,
439 GFP_KERNEL);
440 if (!ring->cpu_base) {
441 bgmac_err(bgmac, "Allocation of TX ring 0x%X failed\n",
442 ring->mmio_base);
443 goto err_dma_free;
444 }
445 if (ring->dma_base & 0xC0000000)
446 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
447
Rafał Miłecki99003032013-09-15 23:13:18 +0200448 ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
449 BGMAC_DMA_RING_TX);
450 if (ring->unaligned)
451 ring->index_base = lower_32_bits(ring->dma_base);
452 else
453 ring->index_base = 0;
454
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000455 /* No need to alloc TX slots yet */
456 }
457
458 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
Rafał Miłecki70a737b2013-02-25 08:22:26 +0000459 int j;
460
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000461 ring = &bgmac->rx_ring[i];
462 ring->num_slots = BGMAC_RX_RING_SLOTS;
463 ring->mmio_base = ring_base[i];
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000464
465 /* Alloc ring of descriptors */
466 size = ring->num_slots * sizeof(struct bgmac_dma_desc);
467 ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
468 &ring->dma_base,
469 GFP_KERNEL);
470 if (!ring->cpu_base) {
471 bgmac_err(bgmac, "Allocation of RX ring 0x%X failed\n",
472 ring->mmio_base);
473 err = -ENOMEM;
474 goto err_dma_free;
475 }
476 if (ring->dma_base & 0xC0000000)
477 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
478
Rafał Miłecki99003032013-09-15 23:13:18 +0200479 ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
480 BGMAC_DMA_RING_RX);
481 if (ring->unaligned)
482 ring->index_base = lower_32_bits(ring->dma_base);
483 else
484 ring->index_base = 0;
485
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000486 /* Alloc RX slots */
Rafał Miłecki70a737b2013-02-25 08:22:26 +0000487 for (j = 0; j < ring->num_slots; j++) {
488 err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000489 if (err) {
490 bgmac_err(bgmac, "Can't allocate skb for slot in RX ring\n");
491 goto err_dma_free;
492 }
493 }
494 }
495
496 return 0;
497
498err_dma_free:
499 bgmac_dma_free(bgmac);
500 return -ENOMEM;
501}
502
503static void bgmac_dma_init(struct bgmac *bgmac)
504{
505 struct bgmac_dma_ring *ring;
506 struct bgmac_dma_desc *dma_desc;
507 u32 ctl0, ctl1;
508 int i;
509
510 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
511 ring = &bgmac->tx_ring[i];
512
Rafał Miłecki99003032013-09-15 23:13:18 +0200513 if (!ring->unaligned)
514 bgmac_dma_tx_enable(bgmac, ring);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000515 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
516 lower_32_bits(ring->dma_base));
517 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI,
518 upper_32_bits(ring->dma_base));
Rafał Miłecki99003032013-09-15 23:13:18 +0200519 if (ring->unaligned)
520 bgmac_dma_tx_enable(bgmac, ring);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000521
522 ring->start = 0;
523 ring->end = 0; /* Points the slot that should *not* be read */
524 }
525
526 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
Rafał Miłecki70a737b2013-02-25 08:22:26 +0000527 int j;
528
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000529 ring = &bgmac->rx_ring[i];
530
Rafał Miłecki99003032013-09-15 23:13:18 +0200531 if (!ring->unaligned)
532 bgmac_dma_rx_enable(bgmac, ring);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000533 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
534 lower_32_bits(ring->dma_base));
535 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI,
536 upper_32_bits(ring->dma_base));
Rafał Miłecki99003032013-09-15 23:13:18 +0200537 if (ring->unaligned)
538 bgmac_dma_rx_enable(bgmac, ring);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000539
Rafał Miłecki70a737b2013-02-25 08:22:26 +0000540 for (j = 0, dma_desc = ring->cpu_base; j < ring->num_slots;
541 j++, dma_desc++) {
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000542 ctl0 = ctl1 = 0;
543
Rafał Miłecki70a737b2013-02-25 08:22:26 +0000544 if (j == ring->num_slots - 1)
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000545 ctl0 |= BGMAC_DESC_CTL0_EOT;
546 ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN;
547 /* Is there any BGMAC device that requires extension? */
548 /* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) &
549 * B43_DMA64_DCTL1_ADDREXT_MASK;
550 */
551
Rafał Miłecki70a737b2013-02-25 08:22:26 +0000552 dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[j].dma_addr));
553 dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[j].dma_addr));
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000554 dma_desc->ctl0 = cpu_to_le32(ctl0);
555 dma_desc->ctl1 = cpu_to_le32(ctl1);
556 }
557
558 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
Rafał Miłecki99003032013-09-15 23:13:18 +0200559 ring->index_base +
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000560 ring->num_slots * sizeof(struct bgmac_dma_desc));
561
562 ring->start = 0;
563 ring->end = 0;
564 }
565}
566
567/**************************************************
568 * PHY ops
569 **************************************************/
570
Rafał Miłecki217a55a2013-02-12 23:14:51 +0000571static u16 bgmac_phy_read(struct bgmac *bgmac, u8 phyaddr, u8 reg)
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000572{
573 struct bcma_device *core;
574 u16 phy_access_addr;
575 u16 phy_ctl_addr;
576 u32 tmp;
577
578 BUILD_BUG_ON(BGMAC_PA_DATA_MASK != BCMA_GMAC_CMN_PA_DATA_MASK);
579 BUILD_BUG_ON(BGMAC_PA_ADDR_MASK != BCMA_GMAC_CMN_PA_ADDR_MASK);
580 BUILD_BUG_ON(BGMAC_PA_ADDR_SHIFT != BCMA_GMAC_CMN_PA_ADDR_SHIFT);
581 BUILD_BUG_ON(BGMAC_PA_REG_MASK != BCMA_GMAC_CMN_PA_REG_MASK);
582 BUILD_BUG_ON(BGMAC_PA_REG_SHIFT != BCMA_GMAC_CMN_PA_REG_SHIFT);
583 BUILD_BUG_ON(BGMAC_PA_WRITE != BCMA_GMAC_CMN_PA_WRITE);
584 BUILD_BUG_ON(BGMAC_PA_START != BCMA_GMAC_CMN_PA_START);
585 BUILD_BUG_ON(BGMAC_PC_EPA_MASK != BCMA_GMAC_CMN_PC_EPA_MASK);
586 BUILD_BUG_ON(BGMAC_PC_MCT_MASK != BCMA_GMAC_CMN_PC_MCT_MASK);
587 BUILD_BUG_ON(BGMAC_PC_MCT_SHIFT != BCMA_GMAC_CMN_PC_MCT_SHIFT);
588 BUILD_BUG_ON(BGMAC_PC_MTE != BCMA_GMAC_CMN_PC_MTE);
589
590 if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) {
591 core = bgmac->core->bus->drv_gmac_cmn.core;
592 phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS;
593 phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL;
594 } else {
595 core = bgmac->core;
596 phy_access_addr = BGMAC_PHY_ACCESS;
597 phy_ctl_addr = BGMAC_PHY_CNTL;
598 }
599
600 tmp = bcma_read32(core, phy_ctl_addr);
601 tmp &= ~BGMAC_PC_EPA_MASK;
602 tmp |= phyaddr;
603 bcma_write32(core, phy_ctl_addr, tmp);
604
605 tmp = BGMAC_PA_START;
606 tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT;
607 tmp |= reg << BGMAC_PA_REG_SHIFT;
608 bcma_write32(core, phy_access_addr, tmp);
609
610 if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) {
611 bgmac_err(bgmac, "Reading PHY %d register 0x%X failed\n",
612 phyaddr, reg);
613 return 0xffff;
614 }
615
616 return bcma_read32(core, phy_access_addr) & BGMAC_PA_DATA_MASK;
617}
618
619/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphywr */
Rafał Miłecki217a55a2013-02-12 23:14:51 +0000620static int bgmac_phy_write(struct bgmac *bgmac, u8 phyaddr, u8 reg, u16 value)
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000621{
622 struct bcma_device *core;
623 u16 phy_access_addr;
624 u16 phy_ctl_addr;
625 u32 tmp;
626
627 if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) {
628 core = bgmac->core->bus->drv_gmac_cmn.core;
629 phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS;
630 phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL;
631 } else {
632 core = bgmac->core;
633 phy_access_addr = BGMAC_PHY_ACCESS;
634 phy_ctl_addr = BGMAC_PHY_CNTL;
635 }
636
637 tmp = bcma_read32(core, phy_ctl_addr);
638 tmp &= ~BGMAC_PC_EPA_MASK;
639 tmp |= phyaddr;
640 bcma_write32(core, phy_ctl_addr, tmp);
641
642 bgmac_write(bgmac, BGMAC_INT_STATUS, BGMAC_IS_MDIO);
643 if (bgmac_read(bgmac, BGMAC_INT_STATUS) & BGMAC_IS_MDIO)
644 bgmac_warn(bgmac, "Error setting MDIO int\n");
645
646 tmp = BGMAC_PA_START;
647 tmp |= BGMAC_PA_WRITE;
648 tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT;
649 tmp |= reg << BGMAC_PA_REG_SHIFT;
650 tmp |= value;
651 bcma_write32(core, phy_access_addr, tmp);
652
Rafał Miłecki217a55a2013-02-12 23:14:51 +0000653 if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) {
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000654 bgmac_err(bgmac, "Writing to PHY %d register 0x%X failed\n",
655 phyaddr, reg);
Rafał Miłecki217a55a2013-02-12 23:14:51 +0000656 return -ETIMEDOUT;
657 }
658
659 return 0;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000660}
661
662/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyforce */
663static void bgmac_phy_force(struct bgmac *bgmac)
664{
665 u16 ctl;
666 u16 mask = ~(BGMAC_PHY_CTL_SPEED | BGMAC_PHY_CTL_SPEED_MSB |
667 BGMAC_PHY_CTL_ANENAB | BGMAC_PHY_CTL_DUPLEX);
668
669 if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
670 return;
671
672 if (bgmac->autoneg)
673 return;
674
675 ctl = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL);
676 ctl &= mask;
677 if (bgmac->full_duplex)
678 ctl |= BGMAC_PHY_CTL_DUPLEX;
679 if (bgmac->speed == BGMAC_SPEED_100)
680 ctl |= BGMAC_PHY_CTL_SPEED_100;
681 else if (bgmac->speed == BGMAC_SPEED_1000)
682 ctl |= BGMAC_PHY_CTL_SPEED_1000;
683 bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL, ctl);
684}
685
686/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyadvertise */
687static void bgmac_phy_advertise(struct bgmac *bgmac)
688{
689 u16 adv;
690
691 if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
692 return;
693
694 if (!bgmac->autoneg)
695 return;
696
697 /* Adv selected 10/100 speeds */
698 adv = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV);
699 adv &= ~(BGMAC_PHY_ADV_10HALF | BGMAC_PHY_ADV_10FULL |
700 BGMAC_PHY_ADV_100HALF | BGMAC_PHY_ADV_100FULL);
701 if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_10)
702 adv |= BGMAC_PHY_ADV_10HALF;
703 if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_100)
704 adv |= BGMAC_PHY_ADV_100HALF;
705 if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_10)
706 adv |= BGMAC_PHY_ADV_10FULL;
707 if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_100)
708 adv |= BGMAC_PHY_ADV_100FULL;
709 bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV, adv);
710
711 /* Adv selected 1000 speeds */
712 adv = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV2);
713 adv &= ~(BGMAC_PHY_ADV2_1000HALF | BGMAC_PHY_ADV2_1000FULL);
714 if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_1000)
715 adv |= BGMAC_PHY_ADV2_1000HALF;
716 if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_1000)
717 adv |= BGMAC_PHY_ADV2_1000FULL;
718 bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV2, adv);
719
720 /* Restart */
721 bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL,
722 bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL) |
723 BGMAC_PHY_CTL_RESTART);
724}
725
726/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyinit */
727static void bgmac_phy_init(struct bgmac *bgmac)
728{
729 struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo;
730 struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
731 u8 i;
732
733 if (ci->id == BCMA_CHIP_ID_BCM5356) {
734 for (i = 0; i < 5; i++) {
735 bgmac_phy_write(bgmac, i, 0x1f, 0x008b);
736 bgmac_phy_write(bgmac, i, 0x15, 0x0100);
737 bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
738 bgmac_phy_write(bgmac, i, 0x12, 0x2aaa);
739 bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
740 }
741 }
742 if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg != 10) ||
743 (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg != 10) ||
744 (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg != 9)) {
745 bcma_chipco_chipctl_maskset(cc, 2, ~0xc0000000, 0);
746 bcma_chipco_chipctl_maskset(cc, 4, ~0x80000000, 0);
747 for (i = 0; i < 5; i++) {
748 bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
749 bgmac_phy_write(bgmac, i, 0x16, 0x5284);
750 bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
751 bgmac_phy_write(bgmac, i, 0x17, 0x0010);
752 bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
753 bgmac_phy_write(bgmac, i, 0x16, 0x5296);
754 bgmac_phy_write(bgmac, i, 0x17, 0x1073);
755 bgmac_phy_write(bgmac, i, 0x17, 0x9073);
756 bgmac_phy_write(bgmac, i, 0x16, 0x52b6);
757 bgmac_phy_write(bgmac, i, 0x17, 0x9273);
758 bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
759 }
760 }
761}
762
763/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyreset */
764static void bgmac_phy_reset(struct bgmac *bgmac)
765{
766 if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
767 return;
768
769 bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL,
770 BGMAC_PHY_CTL_RESET);
771 udelay(100);
772 if (bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL) &
773 BGMAC_PHY_CTL_RESET)
774 bgmac_err(bgmac, "PHY reset failed\n");
775 bgmac_phy_init(bgmac);
776}
777
778/**************************************************
779 * Chip ops
780 **************************************************/
781
782/* TODO: can we just drop @force? Can we don't reset MAC at all if there is
783 * nothing to change? Try if after stabilizng driver.
784 */
785static void bgmac_cmdcfg_maskset(struct bgmac *bgmac, u32 mask, u32 set,
786 bool force)
787{
788 u32 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
789 u32 new_val = (cmdcfg & mask) | set;
790
791 bgmac_set(bgmac, BGMAC_CMDCFG, BGMAC_CMDCFG_SR);
792 udelay(2);
793
794 if (new_val != cmdcfg || force)
795 bgmac_write(bgmac, BGMAC_CMDCFG, new_val);
796
797 bgmac_mask(bgmac, BGMAC_CMDCFG, ~BGMAC_CMDCFG_SR);
798 udelay(2);
799}
800
Hauke Mehrtens4e209002013-02-06 04:44:58 +0000801static void bgmac_write_mac_address(struct bgmac *bgmac, u8 *addr)
802{
803 u32 tmp;
804
805 tmp = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
806 bgmac_write(bgmac, BGMAC_MACADDR_HIGH, tmp);
807 tmp = (addr[4] << 8) | addr[5];
808 bgmac_write(bgmac, BGMAC_MACADDR_LOW, tmp);
809}
810
Hauke Mehrtensc6edfe12013-02-06 05:51:49 +0000811static void bgmac_set_rx_mode(struct net_device *net_dev)
812{
813 struct bgmac *bgmac = netdev_priv(net_dev);
814
815 if (net_dev->flags & IFF_PROMISC)
Rafał Miłeckie9ba1032013-02-07 05:40:38 +0000816 bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_PROM, true);
Hauke Mehrtensc6edfe12013-02-06 05:51:49 +0000817 else
Rafał Miłeckie9ba1032013-02-07 05:40:38 +0000818 bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_PROM, 0, true);
Hauke Mehrtensc6edfe12013-02-06 05:51:49 +0000819}
820
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000821#if 0 /* We don't use that regs yet */
822static void bgmac_chip_stats_update(struct bgmac *bgmac)
823{
824 int i;
825
826 if (bgmac->core->id.id != BCMA_CORE_4706_MAC_GBIT) {
827 for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++)
828 bgmac->mib_tx_regs[i] =
829 bgmac_read(bgmac,
830 BGMAC_TX_GOOD_OCTETS + (i * 4));
831 for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++)
832 bgmac->mib_rx_regs[i] =
833 bgmac_read(bgmac,
834 BGMAC_RX_GOOD_OCTETS + (i * 4));
835 }
836
837 /* TODO: what else? how to handle BCM4706? Specs are needed */
838}
839#endif
840
841static void bgmac_clear_mib(struct bgmac *bgmac)
842{
843 int i;
844
845 if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT)
846 return;
847
848 bgmac_set(bgmac, BGMAC_DEV_CTL, BGMAC_DC_MROR);
849 for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++)
850 bgmac_read(bgmac, BGMAC_TX_GOOD_OCTETS + (i * 4));
851 for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++)
852 bgmac_read(bgmac, BGMAC_RX_GOOD_OCTETS + (i * 4));
853}
854
855/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_speed */
856static void bgmac_speed(struct bgmac *bgmac, int speed)
857{
858 u32 mask = ~(BGMAC_CMDCFG_ES_MASK | BGMAC_CMDCFG_HD);
859 u32 set = 0;
860
861 if (speed & BGMAC_SPEED_10)
862 set |= BGMAC_CMDCFG_ES_10;
863 if (speed & BGMAC_SPEED_100)
864 set |= BGMAC_CMDCFG_ES_100;
865 if (speed & BGMAC_SPEED_1000)
866 set |= BGMAC_CMDCFG_ES_1000;
867 if (!bgmac->full_duplex)
868 set |= BGMAC_CMDCFG_HD;
869 bgmac_cmdcfg_maskset(bgmac, mask, set, true);
870}
871
872static void bgmac_miiconfig(struct bgmac *bgmac)
873{
874 u8 imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
875 BGMAC_DS_MM_SHIFT;
876 if (imode == 0 || imode == 1) {
877 if (bgmac->autoneg)
878 bgmac_speed(bgmac, BGMAC_SPEED_100);
879 else
880 bgmac_speed(bgmac, bgmac->speed);
881 }
882}
883
884/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipreset */
885static void bgmac_chip_reset(struct bgmac *bgmac)
886{
887 struct bcma_device *core = bgmac->core;
888 struct bcma_bus *bus = core->bus;
889 struct bcma_chipinfo *ci = &bus->chipinfo;
890 u32 flags = 0;
891 u32 iost;
892 int i;
893
894 if (bcma_core_is_enabled(core)) {
895 if (!bgmac->stats_grabbed) {
896 /* bgmac_chip_stats_update(bgmac); */
897 bgmac->stats_grabbed = true;
898 }
899
900 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
901 bgmac_dma_tx_reset(bgmac, &bgmac->tx_ring[i]);
902
903 bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
904 udelay(1);
905
906 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
907 bgmac_dma_rx_reset(bgmac, &bgmac->rx_ring[i]);
908
909 /* TODO: Clear software multicast filter list */
910 }
911
912 iost = bcma_aread32(core, BCMA_IOST);
913 if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == 10) ||
914 (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) ||
915 (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == 9))
916 iost &= ~BGMAC_BCMA_IOST_ATTACHED;
917
918 if (iost & BGMAC_BCMA_IOST_ATTACHED) {
919 flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
920 if (!bgmac->has_robosw)
921 flags |= BGMAC_BCMA_IOCTL_SW_RESET;
922 }
923
924 bcma_core_enable(core, flags);
925
926 if (core->id.rev > 2) {
927 bgmac_set(bgmac, BCMA_CLKCTLST, 1 << 8);
928 bgmac_wait_value(bgmac->core, BCMA_CLKCTLST, 1 << 24, 1 << 24,
929 1000);
930 }
931
932 if (ci->id == BCMA_CHIP_ID_BCM5357 || ci->id == BCMA_CHIP_ID_BCM4749 ||
933 ci->id == BCMA_CHIP_ID_BCM53572) {
934 struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
935 u8 et_swtype = 0;
936 u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY |
Rafał Miłecki6a391e72013-09-15 00:22:47 +0200937 BGMAC_CHIPCTL_1_IF_TYPE_MII;
Hauke Mehrtens36472682013-09-15 22:49:08 +0200938 char buf[4];
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000939
Hauke Mehrtens36472682013-09-15 22:49:08 +0200940 if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) {
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000941 if (kstrtou8(buf, 0, &et_swtype))
942 bgmac_err(bgmac, "Failed to parse et_swtype (%s)\n",
943 buf);
944 et_swtype &= 0x0f;
945 et_swtype <<= 4;
946 sw_type = et_swtype;
947 } else if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == 9) {
948 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII;
Hauke Mehrtensb5a4c2f2013-02-06 04:44:57 +0000949 } else if ((ci->id != BCMA_CHIP_ID_BCM53572 && ci->pkg == 10) ||
950 (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == 9)) {
951 sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RGMII |
952 BGMAC_CHIPCTL_1_SW_TYPE_RGMII;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000953 }
954 bcma_chipco_chipctl_maskset(cc, 1,
955 ~(BGMAC_CHIPCTL_1_IF_TYPE_MASK |
956 BGMAC_CHIPCTL_1_SW_TYPE_MASK),
957 sw_type);
958 }
959
960 if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw)
961 bcma_awrite32(core, BCMA_IOCTL,
962 bcma_aread32(core, BCMA_IOCTL) &
963 ~BGMAC_BCMA_IOCTL_SW_RESET);
964
965 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset
966 * Specs don't say about using BGMAC_CMDCFG_SR, but in this routine
967 * BGMAC_CMDCFG is read _after_ putting chip in a reset. So it has to
968 * be keps until taking MAC out of the reset.
969 */
970 bgmac_cmdcfg_maskset(bgmac,
971 ~(BGMAC_CMDCFG_TE |
972 BGMAC_CMDCFG_RE |
973 BGMAC_CMDCFG_RPI |
974 BGMAC_CMDCFG_TAI |
975 BGMAC_CMDCFG_HD |
976 BGMAC_CMDCFG_ML |
977 BGMAC_CMDCFG_CFE |
978 BGMAC_CMDCFG_RL |
979 BGMAC_CMDCFG_RED |
980 BGMAC_CMDCFG_PE |
981 BGMAC_CMDCFG_TPI |
982 BGMAC_CMDCFG_PAD_EN |
983 BGMAC_CMDCFG_PF),
984 BGMAC_CMDCFG_PROM |
985 BGMAC_CMDCFG_NLC |
986 BGMAC_CMDCFG_CFE |
987 BGMAC_CMDCFG_SR,
988 false);
989
990 bgmac_clear_mib(bgmac);
991 if (core->id.id == BCMA_CORE_4706_MAC_GBIT)
992 bcma_maskset32(bgmac->cmn, BCMA_GMAC_CMN_PHY_CTL, ~0,
993 BCMA_GMAC_CMN_PC_MTE);
994 else
995 bgmac_set(bgmac, BGMAC_PHY_CNTL, BGMAC_PC_MTE);
996 bgmac_miiconfig(bgmac);
997 bgmac_phy_init(bgmac);
998
Hauke Mehrtens49a467b2013-09-29 13:54:58 +0200999 netdev_reset_queue(bgmac->net_dev);
1000
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001001 bgmac->int_status = 0;
1002}
1003
1004static void bgmac_chip_intrs_on(struct bgmac *bgmac)
1005{
1006 bgmac_write(bgmac, BGMAC_INT_MASK, bgmac->int_mask);
1007}
1008
1009static void bgmac_chip_intrs_off(struct bgmac *bgmac)
1010{
1011 bgmac_write(bgmac, BGMAC_INT_MASK, 0);
Nathan Hintz41608152013-02-13 19:14:10 +00001012 bgmac_read(bgmac, BGMAC_INT_MASK);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001013}
1014
1015/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_enable */
1016static void bgmac_enable(struct bgmac *bgmac)
1017{
1018 struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo;
1019 u32 cmdcfg;
1020 u32 mode;
1021 u32 rxq_ctl;
1022 u32 fl_ctl;
1023 u16 bp_clk;
1024 u8 mdp;
1025
1026 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
1027 bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE),
1028 BGMAC_CMDCFG_SR, true);
1029 udelay(2);
1030 cmdcfg |= BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE;
1031 bgmac_write(bgmac, BGMAC_CMDCFG, cmdcfg);
1032
1033 mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
1034 BGMAC_DS_MM_SHIFT;
1035 if (ci->id != BCMA_CHIP_ID_BCM47162 || mode != 0)
1036 bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT);
1037 if (ci->id == BCMA_CHIP_ID_BCM47162 && mode == 2)
1038 bcma_chipco_chipctl_maskset(&bgmac->core->bus->drv_cc, 1, ~0,
1039 BGMAC_CHIPCTL_1_RXC_DLL_BYPASS);
1040
1041 switch (ci->id) {
1042 case BCMA_CHIP_ID_BCM5357:
1043 case BCMA_CHIP_ID_BCM4749:
1044 case BCMA_CHIP_ID_BCM53572:
1045 case BCMA_CHIP_ID_BCM4716:
1046 case BCMA_CHIP_ID_BCM47162:
1047 fl_ctl = 0x03cb04cb;
1048 if (ci->id == BCMA_CHIP_ID_BCM5357 ||
1049 ci->id == BCMA_CHIP_ID_BCM4749 ||
1050 ci->id == BCMA_CHIP_ID_BCM53572)
1051 fl_ctl = 0x2300e1;
1052 bgmac_write(bgmac, BGMAC_FLOW_CTL_THRESH, fl_ctl);
1053 bgmac_write(bgmac, BGMAC_PAUSE_CTL, 0x27fff);
1054 break;
1055 }
1056
1057 rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL);
1058 rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK;
1059 bp_clk = bcma_pmu_get_bus_clock(&bgmac->core->bus->drv_cc) / 1000000;
1060 mdp = (bp_clk * 128 / 1000) - 3;
1061 rxq_ctl |= (mdp << BGMAC_RXQ_CTL_MDP_SHIFT);
1062 bgmac_write(bgmac, BGMAC_RXQ_CTL, rxq_ctl);
1063}
1064
1065/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipinit */
1066static void bgmac_chip_init(struct bgmac *bgmac, bool full_init)
1067{
1068 struct bgmac_dma_ring *ring;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001069 int i;
1070
1071 /* 1 interrupt per received frame */
1072 bgmac_write(bgmac, BGMAC_INT_RECV_LAZY, 1 << BGMAC_IRL_FC_SHIFT);
1073
1074 /* Enable 802.3x tx flow control (honor received PAUSE frames) */
1075 bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_RPI, 0, true);
1076
Hauke Mehrtensc6edfe12013-02-06 05:51:49 +00001077 bgmac_set_rx_mode(bgmac->net_dev);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001078
Hauke Mehrtens4e209002013-02-06 04:44:58 +00001079 bgmac_write_mac_address(bgmac, bgmac->net_dev->dev_addr);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001080
1081 if (bgmac->loopback)
Rafał Miłeckie9ba1032013-02-07 05:40:38 +00001082 bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001083 else
Rafał Miłeckie9ba1032013-02-07 05:40:38 +00001084 bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_ML, 0, false);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001085
1086 bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + ETHER_MAX_LEN);
1087
1088 if (!bgmac->autoneg) {
1089 bgmac_speed(bgmac, bgmac->speed);
1090 bgmac_phy_force(bgmac);
1091 } else if (bgmac->speed) { /* if there is anything to adv */
1092 bgmac_phy_advertise(bgmac);
1093 }
1094
1095 if (full_init) {
1096 bgmac_dma_init(bgmac);
1097 if (1) /* FIXME: is there any case we don't want IRQs? */
1098 bgmac_chip_intrs_on(bgmac);
1099 } else {
1100 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
1101 ring = &bgmac->rx_ring[i];
1102 bgmac_dma_rx_enable(bgmac, ring);
1103 }
1104 }
1105
1106 bgmac_enable(bgmac);
1107}
1108
1109static irqreturn_t bgmac_interrupt(int irq, void *dev_id)
1110{
1111 struct bgmac *bgmac = netdev_priv(dev_id);
1112
1113 u32 int_status = bgmac_read(bgmac, BGMAC_INT_STATUS);
1114 int_status &= bgmac->int_mask;
1115
1116 if (!int_status)
1117 return IRQ_NONE;
1118
1119 /* Ack */
1120 bgmac_write(bgmac, BGMAC_INT_STATUS, int_status);
1121
1122 /* Disable new interrupts until handling existing ones */
1123 bgmac_chip_intrs_off(bgmac);
1124
1125 bgmac->int_status = int_status;
1126
1127 napi_schedule(&bgmac->napi);
1128
1129 return IRQ_HANDLED;
1130}
1131
1132static int bgmac_poll(struct napi_struct *napi, int weight)
1133{
1134 struct bgmac *bgmac = container_of(napi, struct bgmac, napi);
1135 struct bgmac_dma_ring *ring;
1136 int handled = 0;
1137
1138 if (bgmac->int_status & BGMAC_IS_TX0) {
1139 ring = &bgmac->tx_ring[0];
1140 bgmac_dma_tx_free(bgmac, ring);
1141 bgmac->int_status &= ~BGMAC_IS_TX0;
1142 }
1143
1144 if (bgmac->int_status & BGMAC_IS_RX) {
1145 ring = &bgmac->rx_ring[0];
1146 handled += bgmac_dma_rx_read(bgmac, ring, weight);
1147 bgmac->int_status &= ~BGMAC_IS_RX;
1148 }
1149
1150 if (bgmac->int_status) {
1151 bgmac_err(bgmac, "Unknown IRQs: 0x%08X\n", bgmac->int_status);
1152 bgmac->int_status = 0;
1153 }
1154
1155 if (handled < weight)
1156 napi_complete(napi);
1157
1158 bgmac_chip_intrs_on(bgmac);
1159
1160 return handled;
1161}
1162
1163/**************************************************
1164 * net_device_ops
1165 **************************************************/
1166
1167static int bgmac_open(struct net_device *net_dev)
1168{
1169 struct bgmac *bgmac = netdev_priv(net_dev);
1170 int err = 0;
1171
1172 bgmac_chip_reset(bgmac);
1173 /* Specs say about reclaiming rings here, but we do that in DMA init */
1174 bgmac_chip_init(bgmac, true);
1175
1176 err = request_irq(bgmac->core->irq, bgmac_interrupt, IRQF_SHARED,
1177 KBUILD_MODNAME, net_dev);
1178 if (err < 0) {
1179 bgmac_err(bgmac, "IRQ request error: %d!\n", err);
1180 goto err_out;
1181 }
1182 napi_enable(&bgmac->napi);
1183
1184 netif_carrier_on(net_dev);
1185
1186err_out:
1187 return err;
1188}
1189
1190static int bgmac_stop(struct net_device *net_dev)
1191{
1192 struct bgmac *bgmac = netdev_priv(net_dev);
1193
1194 netif_carrier_off(net_dev);
1195
1196 napi_disable(&bgmac->napi);
1197 bgmac_chip_intrs_off(bgmac);
1198 free_irq(bgmac->core->irq, net_dev);
1199
1200 bgmac_chip_reset(bgmac);
1201
1202 return 0;
1203}
1204
1205static netdev_tx_t bgmac_start_xmit(struct sk_buff *skb,
1206 struct net_device *net_dev)
1207{
1208 struct bgmac *bgmac = netdev_priv(net_dev);
1209 struct bgmac_dma_ring *ring;
1210
1211 /* No QOS support yet */
1212 ring = &bgmac->tx_ring[0];
1213 return bgmac_dma_tx_add(bgmac, ring, skb);
1214}
1215
Hauke Mehrtens4e209002013-02-06 04:44:58 +00001216static int bgmac_set_mac_address(struct net_device *net_dev, void *addr)
1217{
1218 struct bgmac *bgmac = netdev_priv(net_dev);
1219 int ret;
1220
1221 ret = eth_prepare_mac_addr_change(net_dev, addr);
1222 if (ret < 0)
1223 return ret;
1224 bgmac_write_mac_address(bgmac, (u8 *)addr);
1225 eth_commit_mac_addr_change(net_dev, addr);
1226 return 0;
1227}
1228
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001229static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
1230{
1231 struct bgmac *bgmac = netdev_priv(net_dev);
1232 struct mii_ioctl_data *data = if_mii(ifr);
1233
1234 switch (cmd) {
1235 case SIOCGMIIPHY:
1236 data->phy_id = bgmac->phyaddr;
1237 /* fallthru */
1238 case SIOCGMIIREG:
1239 if (!netif_running(net_dev))
1240 return -EAGAIN;
1241 data->val_out = bgmac_phy_read(bgmac, data->phy_id,
1242 data->reg_num & 0x1f);
1243 return 0;
1244 case SIOCSMIIREG:
1245 if (!netif_running(net_dev))
1246 return -EAGAIN;
1247 bgmac_phy_write(bgmac, data->phy_id, data->reg_num & 0x1f,
1248 data->val_in);
1249 return 0;
1250 default:
1251 return -EOPNOTSUPP;
1252 }
1253}
1254
1255static const struct net_device_ops bgmac_netdev_ops = {
1256 .ndo_open = bgmac_open,
1257 .ndo_stop = bgmac_stop,
1258 .ndo_start_xmit = bgmac_start_xmit,
Hauke Mehrtensc6edfe12013-02-06 05:51:49 +00001259 .ndo_set_rx_mode = bgmac_set_rx_mode,
Hauke Mehrtens4e209002013-02-06 04:44:58 +00001260 .ndo_set_mac_address = bgmac_set_mac_address,
Hauke Mehrtens522c5902013-02-06 04:44:59 +00001261 .ndo_validate_addr = eth_validate_addr,
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001262 .ndo_do_ioctl = bgmac_ioctl,
1263};
1264
1265/**************************************************
1266 * ethtool_ops
1267 **************************************************/
1268
1269static int bgmac_get_settings(struct net_device *net_dev,
1270 struct ethtool_cmd *cmd)
1271{
1272 struct bgmac *bgmac = netdev_priv(net_dev);
1273
1274 cmd->supported = SUPPORTED_10baseT_Half |
1275 SUPPORTED_10baseT_Full |
1276 SUPPORTED_100baseT_Half |
1277 SUPPORTED_100baseT_Full |
1278 SUPPORTED_1000baseT_Half |
1279 SUPPORTED_1000baseT_Full |
1280 SUPPORTED_Autoneg;
1281
1282 if (bgmac->autoneg) {
1283 WARN_ON(cmd->advertising);
1284 if (bgmac->full_duplex) {
1285 if (bgmac->speed & BGMAC_SPEED_10)
1286 cmd->advertising |= ADVERTISED_10baseT_Full;
1287 if (bgmac->speed & BGMAC_SPEED_100)
1288 cmd->advertising |= ADVERTISED_100baseT_Full;
1289 if (bgmac->speed & BGMAC_SPEED_1000)
1290 cmd->advertising |= ADVERTISED_1000baseT_Full;
1291 } else {
1292 if (bgmac->speed & BGMAC_SPEED_10)
1293 cmd->advertising |= ADVERTISED_10baseT_Half;
1294 if (bgmac->speed & BGMAC_SPEED_100)
1295 cmd->advertising |= ADVERTISED_100baseT_Half;
1296 if (bgmac->speed & BGMAC_SPEED_1000)
1297 cmd->advertising |= ADVERTISED_1000baseT_Half;
1298 }
1299 } else {
1300 switch (bgmac->speed) {
1301 case BGMAC_SPEED_10:
1302 ethtool_cmd_speed_set(cmd, SPEED_10);
1303 break;
1304 case BGMAC_SPEED_100:
1305 ethtool_cmd_speed_set(cmd, SPEED_100);
1306 break;
1307 case BGMAC_SPEED_1000:
1308 ethtool_cmd_speed_set(cmd, SPEED_1000);
1309 break;
1310 }
1311 }
1312
1313 cmd->duplex = bgmac->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
1314
1315 cmd->autoneg = bgmac->autoneg;
1316
1317 return 0;
1318}
1319
1320#if 0
1321static int bgmac_set_settings(struct net_device *net_dev,
1322 struct ethtool_cmd *cmd)
1323{
1324 struct bgmac *bgmac = netdev_priv(net_dev);
1325
1326 return -1;
1327}
1328#endif
1329
1330static void bgmac_get_drvinfo(struct net_device *net_dev,
1331 struct ethtool_drvinfo *info)
1332{
1333 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1334 strlcpy(info->bus_info, "BCMA", sizeof(info->bus_info));
1335}
1336
1337static const struct ethtool_ops bgmac_ethtool_ops = {
1338 .get_settings = bgmac_get_settings,
1339 .get_drvinfo = bgmac_get_drvinfo,
1340};
1341
1342/**************************************************
Rafał Miłecki11e5e762013-03-07 01:53:28 +00001343 * MII
1344 **************************************************/
1345
1346static int bgmac_mii_read(struct mii_bus *bus, int mii_id, int regnum)
1347{
1348 return bgmac_phy_read(bus->priv, mii_id, regnum);
1349}
1350
1351static int bgmac_mii_write(struct mii_bus *bus, int mii_id, int regnum,
1352 u16 value)
1353{
1354 return bgmac_phy_write(bus->priv, mii_id, regnum, value);
1355}
1356
1357static int bgmac_mii_register(struct bgmac *bgmac)
1358{
1359 struct mii_bus *mii_bus;
1360 int i, err = 0;
1361
1362 mii_bus = mdiobus_alloc();
1363 if (!mii_bus)
1364 return -ENOMEM;
1365
1366 mii_bus->name = "bgmac mii bus";
1367 sprintf(mii_bus->id, "%s-%d-%d", "bgmac", bgmac->core->bus->num,
1368 bgmac->core->core_unit);
1369 mii_bus->priv = bgmac;
1370 mii_bus->read = bgmac_mii_read;
1371 mii_bus->write = bgmac_mii_write;
1372 mii_bus->parent = &bgmac->core->dev;
1373 mii_bus->phy_mask = ~(1 << bgmac->phyaddr);
1374
1375 mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
1376 if (!mii_bus->irq) {
1377 err = -ENOMEM;
1378 goto err_free_bus;
1379 }
1380 for (i = 0; i < PHY_MAX_ADDR; i++)
1381 mii_bus->irq[i] = PHY_POLL;
1382
1383 err = mdiobus_register(mii_bus);
1384 if (err) {
1385 bgmac_err(bgmac, "Registration of mii bus failed\n");
1386 goto err_free_irq;
1387 }
1388
1389 bgmac->mii_bus = mii_bus;
1390
1391 return err;
1392
1393err_free_irq:
1394 kfree(mii_bus->irq);
1395err_free_bus:
1396 mdiobus_free(mii_bus);
1397 return err;
1398}
1399
1400static void bgmac_mii_unregister(struct bgmac *bgmac)
1401{
1402 struct mii_bus *mii_bus = bgmac->mii_bus;
1403
1404 mdiobus_unregister(mii_bus);
1405 kfree(mii_bus->irq);
1406 mdiobus_free(mii_bus);
1407}
1408
1409/**************************************************
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001410 * BCMA bus ops
1411 **************************************************/
1412
1413/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipattach */
1414static int bgmac_probe(struct bcma_device *core)
1415{
1416 struct net_device *net_dev;
1417 struct bgmac *bgmac;
1418 struct ssb_sprom *sprom = &core->bus->sprom;
1419 u8 *mac = core->core_unit ? sprom->et1mac : sprom->et0mac;
1420 int err;
1421
1422 /* We don't support 2nd, 3rd, ... units, SPROM has to be adjusted */
1423 if (core->core_unit > 1) {
1424 pr_err("Unsupported core_unit %d\n", core->core_unit);
1425 return -ENOTSUPP;
1426 }
1427
Rafał Miłeckid166f212013-02-07 00:27:17 +00001428 if (!is_valid_ether_addr(mac)) {
1429 dev_err(&core->dev, "Invalid MAC addr: %pM\n", mac);
1430 eth_random_addr(mac);
1431 dev_warn(&core->dev, "Using random MAC: %pM\n", mac);
1432 }
1433
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001434 /* Allocation and references */
1435 net_dev = alloc_etherdev(sizeof(*bgmac));
1436 if (!net_dev)
1437 return -ENOMEM;
1438 net_dev->netdev_ops = &bgmac_netdev_ops;
1439 net_dev->irq = core->irq;
1440 SET_ETHTOOL_OPS(net_dev, &bgmac_ethtool_ops);
1441 bgmac = netdev_priv(net_dev);
1442 bgmac->net_dev = net_dev;
1443 bgmac->core = core;
1444 bcma_set_drvdata(core, bgmac);
1445
1446 /* Defaults */
1447 bgmac->autoneg = true;
1448 bgmac->full_duplex = true;
1449 bgmac->speed = BGMAC_SPEED_10 | BGMAC_SPEED_100 | BGMAC_SPEED_1000;
1450 memcpy(bgmac->net_dev->dev_addr, mac, ETH_ALEN);
1451
1452 /* On BCM4706 we need common core to access PHY */
1453 if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
1454 !core->bus->drv_gmac_cmn.core) {
1455 bgmac_err(bgmac, "GMAC CMN core not found (required for BCM4706)\n");
1456 err = -ENODEV;
1457 goto err_netdev_free;
1458 }
1459 bgmac->cmn = core->bus->drv_gmac_cmn.core;
1460
1461 bgmac->phyaddr = core->core_unit ? sprom->et1phyaddr :
1462 sprom->et0phyaddr;
1463 bgmac->phyaddr &= BGMAC_PHY_MASK;
1464 if (bgmac->phyaddr == BGMAC_PHY_MASK) {
1465 bgmac_err(bgmac, "No PHY found\n");
1466 err = -ENODEV;
1467 goto err_netdev_free;
1468 }
1469 bgmac_info(bgmac, "Found PHY addr: %d%s\n", bgmac->phyaddr,
1470 bgmac->phyaddr == BGMAC_PHY_NOREGS ? " (NOREGS)" : "");
1471
1472 if (core->bus->hosttype == BCMA_HOSTTYPE_PCI) {
1473 bgmac_err(bgmac, "PCI setup not implemented\n");
1474 err = -ENOTSUPP;
1475 goto err_netdev_free;
1476 }
1477
1478 bgmac_chip_reset(bgmac);
1479
1480 err = bgmac_dma_alloc(bgmac);
1481 if (err) {
1482 bgmac_err(bgmac, "Unable to alloc memory for DMA\n");
1483 goto err_netdev_free;
1484 }
1485
1486 bgmac->int_mask = BGMAC_IS_ERRMASK | BGMAC_IS_RX | BGMAC_IS_TX_MASK;
Ralf Baechleedb15d82013-02-21 16:16:55 +01001487 if (bcm47xx_nvram_getenv("et0_no_txint", NULL, 0) == 0)
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001488 bgmac->int_mask &= ~BGMAC_IS_TX_MASK;
1489
1490 /* TODO: reset the external phy. Specs are needed */
1491 bgmac_phy_reset(bgmac);
1492
1493 bgmac->has_robosw = !!(core->bus->sprom.boardflags_lo &
1494 BGMAC_BFL_ENETROBO);
1495 if (bgmac->has_robosw)
1496 bgmac_warn(bgmac, "Support for Roboswitch not implemented\n");
1497
1498 if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM)
1499 bgmac_warn(bgmac, "Support for ADMtek ethernet switch not implemented\n");
1500
Rafał Miłecki11e5e762013-03-07 01:53:28 +00001501 err = bgmac_mii_register(bgmac);
1502 if (err) {
1503 bgmac_err(bgmac, "Cannot register MDIO\n");
1504 err = -ENOTSUPP;
1505 goto err_dma_free;
1506 }
1507
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001508 err = register_netdev(bgmac->net_dev);
1509 if (err) {
1510 bgmac_err(bgmac, "Cannot register net device\n");
1511 err = -ENOTSUPP;
Rafał Miłecki11e5e762013-03-07 01:53:28 +00001512 goto err_mii_unregister;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001513 }
1514
1515 netif_carrier_off(net_dev);
1516
1517 netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT);
1518
1519 return 0;
1520
Rafał Miłecki11e5e762013-03-07 01:53:28 +00001521err_mii_unregister:
1522 bgmac_mii_unregister(bgmac);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001523err_dma_free:
1524 bgmac_dma_free(bgmac);
1525
1526err_netdev_free:
1527 bcma_set_drvdata(core, NULL);
1528 free_netdev(net_dev);
1529
1530 return err;
1531}
1532
1533static void bgmac_remove(struct bcma_device *core)
1534{
1535 struct bgmac *bgmac = bcma_get_drvdata(core);
1536
1537 netif_napi_del(&bgmac->napi);
1538 unregister_netdev(bgmac->net_dev);
Rafał Miłecki11e5e762013-03-07 01:53:28 +00001539 bgmac_mii_unregister(bgmac);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001540 bgmac_dma_free(bgmac);
1541 bcma_set_drvdata(core, NULL);
1542 free_netdev(bgmac->net_dev);
1543}
1544
1545static struct bcma_driver bgmac_bcma_driver = {
1546 .name = KBUILD_MODNAME,
1547 .id_table = bgmac_bcma_tbl,
1548 .probe = bgmac_probe,
1549 .remove = bgmac_remove,
1550};
1551
1552static int __init bgmac_init(void)
1553{
1554 int err;
1555
1556 err = bcma_driver_register(&bgmac_bcma_driver);
1557 if (err)
1558 return err;
1559 pr_info("Broadcom 47xx GBit MAC driver loaded\n");
1560
1561 return 0;
1562}
1563
1564static void __exit bgmac_exit(void)
1565{
1566 bcma_driver_unregister(&bgmac_bcma_driver);
1567}
1568
1569module_init(bgmac_init)
1570module_exit(bgmac_exit)
1571
1572MODULE_AUTHOR("Rafał Miłecki");
1573MODULE_LICENSE("GPL");