blob: 4f9beed54506a888d21a40c77b0db18b708f308d [file] [log] [blame]
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001/*
2 * Driver for (BCM4706)? GBit MAC core on BCMA bus.
3 *
4 * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
5 *
6 * Licensed under the GNU/GPL. See COPYING for details.
7 */
8
9#include "bgmac.h"
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/delay.h>
14#include <linux/etherdevice.h>
15#include <linux/mii.h>
Rafał Miłecki11e5e762013-03-07 01:53:28 +000016#include <linux/phy.h>
Rafał Miłeckidd4544f2013-01-08 20:06:23 +000017#include <linux/interrupt.h>
18#include <linux/dma-mapping.h>
Ralf Baechleedb15d82013-02-21 16:16:55 +010019#include <bcm47xx_nvram.h>
Rafał Miłeckidd4544f2013-01-08 20:06:23 +000020
21static const struct bcma_device_id bgmac_bcma_tbl[] = {
22 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_4706_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS),
23 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS),
24 BCMA_CORETABLE_END
25};
26MODULE_DEVICE_TABLE(bcma, bgmac_bcma_tbl);
27
28static bool bgmac_wait_value(struct bcma_device *core, u16 reg, u32 mask,
29 u32 value, int timeout)
30{
31 u32 val;
32 int i;
33
34 for (i = 0; i < timeout / 10; i++) {
35 val = bcma_read32(core, reg);
36 if ((val & mask) == value)
37 return true;
38 udelay(10);
39 }
40 pr_err("Timeout waiting for reg 0x%X\n", reg);
41 return false;
42}
43
44/**************************************************
45 * DMA
46 **************************************************/
47
48static void bgmac_dma_tx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
49{
50 u32 val;
51 int i;
52
53 if (!ring->mmio_base)
54 return;
55
56 /* Suspend DMA TX ring first.
57 * bgmac_wait_value doesn't support waiting for any of few values, so
58 * implement whole loop here.
59 */
60 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL,
61 BGMAC_DMA_TX_SUSPEND);
62 for (i = 0; i < 10000 / 10; i++) {
63 val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
64 val &= BGMAC_DMA_TX_STAT;
65 if (val == BGMAC_DMA_TX_STAT_DISABLED ||
66 val == BGMAC_DMA_TX_STAT_IDLEWAIT ||
67 val == BGMAC_DMA_TX_STAT_STOPPED) {
68 i = 0;
69 break;
70 }
71 udelay(10);
72 }
73 if (i)
74 bgmac_err(bgmac, "Timeout suspending DMA TX ring 0x%X (BGMAC_DMA_TX_STAT: 0x%08X)\n",
75 ring->mmio_base, val);
76
77 /* Remove SUSPEND bit */
78 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, 0);
79 if (!bgmac_wait_value(bgmac->core,
80 ring->mmio_base + BGMAC_DMA_TX_STATUS,
81 BGMAC_DMA_TX_STAT, BGMAC_DMA_TX_STAT_DISABLED,
82 10000)) {
83 bgmac_warn(bgmac, "DMA TX ring 0x%X wasn't disabled on time, waiting additional 300us\n",
84 ring->mmio_base);
85 udelay(300);
86 val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
87 if ((val & BGMAC_DMA_TX_STAT) != BGMAC_DMA_TX_STAT_DISABLED)
88 bgmac_err(bgmac, "Reset of DMA TX ring 0x%X failed\n",
89 ring->mmio_base);
90 }
91}
92
93static void bgmac_dma_tx_enable(struct bgmac *bgmac,
94 struct bgmac_dma_ring *ring)
95{
96 u32 ctl;
97
98 ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL);
99 ctl |= BGMAC_DMA_TX_ENABLE;
100 ctl |= BGMAC_DMA_TX_PARITY_DISABLE;
101 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, ctl);
102}
103
104static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
105 struct bgmac_dma_ring *ring,
106 struct sk_buff *skb)
107{
108 struct device *dma_dev = bgmac->core->dma_dev;
109 struct net_device *net_dev = bgmac->net_dev;
110 struct bgmac_dma_desc *dma_desc;
111 struct bgmac_slot_info *slot;
112 u32 ctl0, ctl1;
113 int free_slots;
114
115 if (skb->len > BGMAC_DESC_CTL1_LEN) {
116 bgmac_err(bgmac, "Too long skb (%d)\n", skb->len);
117 goto err_stop_drop;
118 }
119
120 if (ring->start <= ring->end)
121 free_slots = ring->start - ring->end + BGMAC_TX_RING_SLOTS;
122 else
123 free_slots = ring->start - ring->end;
124 if (free_slots == 1) {
125 bgmac_err(bgmac, "TX ring is full, queue should be stopped!\n");
126 netif_stop_queue(net_dev);
127 return NETDEV_TX_BUSY;
128 }
129
130 slot = &ring->slots[ring->end];
131 slot->skb = skb;
132 slot->dma_addr = dma_map_single(dma_dev, skb->data, skb->len,
133 DMA_TO_DEVICE);
134 if (dma_mapping_error(dma_dev, slot->dma_addr)) {
135 bgmac_err(bgmac, "Mapping error of skb on ring 0x%X\n",
136 ring->mmio_base);
137 goto err_stop_drop;
138 }
139
140 ctl0 = BGMAC_DESC_CTL0_IOC | BGMAC_DESC_CTL0_SOF | BGMAC_DESC_CTL0_EOF;
141 if (ring->end == ring->num_slots - 1)
142 ctl0 |= BGMAC_DESC_CTL0_EOT;
143 ctl1 = skb->len & BGMAC_DESC_CTL1_LEN;
144
145 dma_desc = ring->cpu_base;
146 dma_desc += ring->end;
147 dma_desc->addr_low = cpu_to_le32(lower_32_bits(slot->dma_addr));
148 dma_desc->addr_high = cpu_to_le32(upper_32_bits(slot->dma_addr));
149 dma_desc->ctl0 = cpu_to_le32(ctl0);
150 dma_desc->ctl1 = cpu_to_le32(ctl1);
151
Hauke Mehrtens49a467b2013-09-29 13:54:58 +0200152 netdev_sent_queue(net_dev, skb->len);
153
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000154 wmb();
155
156 /* Increase ring->end to point empty slot. We tell hardware the first
157 * slot it should *not* read.
158 */
159 if (++ring->end >= BGMAC_TX_RING_SLOTS)
160 ring->end = 0;
161 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
Rafał Miłecki99003032013-09-15 23:13:18 +0200162 ring->index_base +
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000163 ring->end * sizeof(struct bgmac_dma_desc));
164
165 /* Always keep one slot free to allow detecting bugged calls. */
166 if (--free_slots == 1)
167 netif_stop_queue(net_dev);
168
169 return NETDEV_TX_OK;
170
171err_stop_drop:
172 netif_stop_queue(net_dev);
173 dev_kfree_skb(skb);
174 return NETDEV_TX_OK;
175}
176
177/* Free transmitted packets */
178static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
179{
180 struct device *dma_dev = bgmac->core->dma_dev;
181 int empty_slot;
182 bool freed = false;
Hauke Mehrtens49a467b2013-09-29 13:54:58 +0200183 unsigned bytes_compl = 0, pkts_compl = 0;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000184
185 /* The last slot that hardware didn't consume yet */
186 empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
187 empty_slot &= BGMAC_DMA_TX_STATDPTR;
Rafał Miłecki99003032013-09-15 23:13:18 +0200188 empty_slot -= ring->index_base;
189 empty_slot &= BGMAC_DMA_TX_STATDPTR;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000190 empty_slot /= sizeof(struct bgmac_dma_desc);
191
192 while (ring->start != empty_slot) {
193 struct bgmac_slot_info *slot = &ring->slots[ring->start];
194
195 if (slot->skb) {
196 /* Unmap no longer used buffer */
197 dma_unmap_single(dma_dev, slot->dma_addr,
198 slot->skb->len, DMA_TO_DEVICE);
199 slot->dma_addr = 0;
200
Hauke Mehrtens49a467b2013-09-29 13:54:58 +0200201 bytes_compl += slot->skb->len;
202 pkts_compl++;
203
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000204 /* Free memory! :) */
205 dev_kfree_skb(slot->skb);
206 slot->skb = NULL;
207 } else {
208 bgmac_err(bgmac, "Hardware reported transmission for empty TX ring slot %d! End of ring: %d\n",
209 ring->start, ring->end);
210 }
211
212 if (++ring->start >= BGMAC_TX_RING_SLOTS)
213 ring->start = 0;
214 freed = true;
215 }
216
Hauke Mehrtens49a467b2013-09-29 13:54:58 +0200217 netdev_completed_queue(bgmac->net_dev, pkts_compl, bytes_compl);
218
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000219 if (freed && netif_queue_stopped(bgmac->net_dev))
220 netif_wake_queue(bgmac->net_dev);
221}
222
223static void bgmac_dma_rx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
224{
225 if (!ring->mmio_base)
226 return;
227
228 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, 0);
229 if (!bgmac_wait_value(bgmac->core,
230 ring->mmio_base + BGMAC_DMA_RX_STATUS,
231 BGMAC_DMA_RX_STAT, BGMAC_DMA_RX_STAT_DISABLED,
232 10000))
233 bgmac_err(bgmac, "Reset of ring 0x%X RX failed\n",
234 ring->mmio_base);
235}
236
237static void bgmac_dma_rx_enable(struct bgmac *bgmac,
238 struct bgmac_dma_ring *ring)
239{
240 u32 ctl;
241
242 ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL);
243 ctl &= BGMAC_DMA_RX_ADDREXT_MASK;
244 ctl |= BGMAC_DMA_RX_ENABLE;
245 ctl |= BGMAC_DMA_RX_PARITY_DISABLE;
246 ctl |= BGMAC_DMA_RX_OVERFLOW_CONT;
247 ctl |= BGMAC_RX_FRAME_OFFSET << BGMAC_DMA_RX_FRAME_OFFSET_SHIFT;
248 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, ctl);
249}
250
251static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
252 struct bgmac_slot_info *slot)
253{
254 struct device *dma_dev = bgmac->core->dma_dev;
255 struct bgmac_rx_header *rx;
256
257 /* Alloc skb */
258 slot->skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE);
Joe Perches720a43e2013-03-08 15:03:25 +0000259 if (!slot->skb)
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000260 return -ENOMEM;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000261
262 /* Poison - if everything goes fine, hardware will overwrite it */
263 rx = (struct bgmac_rx_header *)slot->skb->data;
264 rx->len = cpu_to_le16(0xdead);
265 rx->flags = cpu_to_le16(0xbeef);
266
267 /* Map skb for the DMA */
268 slot->dma_addr = dma_map_single(dma_dev, slot->skb->data,
269 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
270 if (dma_mapping_error(dma_dev, slot->dma_addr)) {
271 bgmac_err(bgmac, "DMA mapping error\n");
272 return -ENOMEM;
273 }
274 if (slot->dma_addr & 0xC0000000)
275 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
276
277 return 0;
278}
279
Rafał Miłeckid549c76b2013-10-28 14:40:29 +0100280static void bgmac_dma_rx_setup_desc(struct bgmac *bgmac,
281 struct bgmac_dma_ring *ring, int desc_idx)
282{
283 struct bgmac_dma_desc *dma_desc = ring->cpu_base + desc_idx;
284 u32 ctl0 = 0, ctl1 = 0;
285
286 if (desc_idx == ring->num_slots - 1)
287 ctl0 |= BGMAC_DESC_CTL0_EOT;
288 ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN;
289 /* Is there any BGMAC device that requires extension? */
290 /* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) &
291 * B43_DMA64_DCTL1_ADDREXT_MASK;
292 */
293
294 dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[desc_idx].dma_addr));
295 dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[desc_idx].dma_addr));
296 dma_desc->ctl0 = cpu_to_le32(ctl0);
297 dma_desc->ctl1 = cpu_to_le32(ctl1);
298}
299
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000300static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
301 int weight)
302{
303 u32 end_slot;
304 int handled = 0;
305
306 end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS);
307 end_slot &= BGMAC_DMA_RX_STATDPTR;
Rafał Miłecki99003032013-09-15 23:13:18 +0200308 end_slot -= ring->index_base;
309 end_slot &= BGMAC_DMA_RX_STATDPTR;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000310 end_slot /= sizeof(struct bgmac_dma_desc);
311
312 ring->end = end_slot;
313
314 while (ring->start != ring->end) {
315 struct device *dma_dev = bgmac->core->dma_dev;
316 struct bgmac_slot_info *slot = &ring->slots[ring->start];
317 struct sk_buff *skb = slot->skb;
318 struct sk_buff *new_skb;
319 struct bgmac_rx_header *rx;
320 u16 len, flags;
321
322 /* Unmap buffer to make it accessible to the CPU */
323 dma_sync_single_for_cpu(dma_dev, slot->dma_addr,
324 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
325
326 /* Get info from the header */
327 rx = (struct bgmac_rx_header *)skb->data;
328 len = le16_to_cpu(rx->len);
329 flags = le16_to_cpu(rx->flags);
330
331 /* Check for poison and drop or pass the packet */
332 if (len == 0xdead && flags == 0xbeef) {
333 bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
334 ring->start);
335 } else {
Hauke Mehrtens02e71122013-02-28 07:16:54 +0000336 /* Omit CRC. */
337 len -= ETH_FCS_LEN;
338
Hauke Mehrtens885d2992013-02-16 11:10:38 +0000339 new_skb = netdev_alloc_skb_ip_align(bgmac->net_dev, len);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000340 if (new_skb) {
341 skb_put(new_skb, len);
342 skb_copy_from_linear_data_offset(skb, BGMAC_RX_FRAME_OFFSET,
343 new_skb->data,
344 len);
Hauke Mehrtens02e71122013-02-28 07:16:54 +0000345 skb_checksum_none_assert(skb);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000346 new_skb->protocol =
347 eth_type_trans(new_skb, bgmac->net_dev);
348 netif_receive_skb(new_skb);
349 handled++;
350 } else {
351 bgmac->net_dev->stats.rx_dropped++;
352 bgmac_err(bgmac, "Allocation of skb for copying packet failed!\n");
353 }
354
355 /* Poison the old skb */
356 rx->len = cpu_to_le16(0xdead);
357 rx->flags = cpu_to_le16(0xbeef);
358 }
359
360 /* Make it back accessible to the hardware */
361 dma_sync_single_for_device(dma_dev, slot->dma_addr,
362 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
363
364 if (++ring->start >= BGMAC_RX_RING_SLOTS)
365 ring->start = 0;
366
367 if (handled >= weight) /* Should never be greater */
368 break;
369 }
370
371 return handled;
372}
373
374/* Does ring support unaligned addressing? */
375static bool bgmac_dma_unaligned(struct bgmac *bgmac,
376 struct bgmac_dma_ring *ring,
377 enum bgmac_dma_ring_type ring_type)
378{
379 switch (ring_type) {
380 case BGMAC_DMA_RING_TX:
381 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
382 0xff0);
383 if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO))
384 return true;
385 break;
386 case BGMAC_DMA_RING_RX:
387 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
388 0xff0);
389 if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO))
390 return true;
391 break;
392 }
393 return false;
394}
395
396static void bgmac_dma_ring_free(struct bgmac *bgmac,
397 struct bgmac_dma_ring *ring)
398{
399 struct device *dma_dev = bgmac->core->dma_dev;
400 struct bgmac_slot_info *slot;
401 int size;
402 int i;
403
404 for (i = 0; i < ring->num_slots; i++) {
405 slot = &ring->slots[i];
406 if (slot->skb) {
407 if (slot->dma_addr)
408 dma_unmap_single(dma_dev, slot->dma_addr,
409 slot->skb->len, DMA_TO_DEVICE);
410 dev_kfree_skb(slot->skb);
411 }
412 }
413
414 if (ring->cpu_base) {
415 /* Free ring of descriptors */
416 size = ring->num_slots * sizeof(struct bgmac_dma_desc);
417 dma_free_coherent(dma_dev, size, ring->cpu_base,
418 ring->dma_base);
419 }
420}
421
422static void bgmac_dma_free(struct bgmac *bgmac)
423{
424 int i;
425
426 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
427 bgmac_dma_ring_free(bgmac, &bgmac->tx_ring[i]);
428 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
429 bgmac_dma_ring_free(bgmac, &bgmac->rx_ring[i]);
430}
431
432static int bgmac_dma_alloc(struct bgmac *bgmac)
433{
434 struct device *dma_dev = bgmac->core->dma_dev;
435 struct bgmac_dma_ring *ring;
436 static const u16 ring_base[] = { BGMAC_DMA_BASE0, BGMAC_DMA_BASE1,
437 BGMAC_DMA_BASE2, BGMAC_DMA_BASE3, };
438 int size; /* ring size: different for Tx and Rx */
439 int err;
440 int i;
441
442 BUILD_BUG_ON(BGMAC_MAX_TX_RINGS > ARRAY_SIZE(ring_base));
443 BUILD_BUG_ON(BGMAC_MAX_RX_RINGS > ARRAY_SIZE(ring_base));
444
445 if (!(bcma_aread32(bgmac->core, BCMA_IOST) & BCMA_IOST_DMA64)) {
446 bgmac_err(bgmac, "Core does not report 64-bit DMA\n");
447 return -ENOTSUPP;
448 }
449
450 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
451 ring = &bgmac->tx_ring[i];
452 ring->num_slots = BGMAC_TX_RING_SLOTS;
453 ring->mmio_base = ring_base[i];
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000454
455 /* Alloc ring of descriptors */
456 size = ring->num_slots * sizeof(struct bgmac_dma_desc);
457 ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
458 &ring->dma_base,
459 GFP_KERNEL);
460 if (!ring->cpu_base) {
461 bgmac_err(bgmac, "Allocation of TX ring 0x%X failed\n",
462 ring->mmio_base);
463 goto err_dma_free;
464 }
465 if (ring->dma_base & 0xC0000000)
466 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
467
Rafał Miłecki99003032013-09-15 23:13:18 +0200468 ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
469 BGMAC_DMA_RING_TX);
470 if (ring->unaligned)
471 ring->index_base = lower_32_bits(ring->dma_base);
472 else
473 ring->index_base = 0;
474
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000475 /* No need to alloc TX slots yet */
476 }
477
478 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
Rafał Miłecki70a737b2013-02-25 08:22:26 +0000479 int j;
480
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000481 ring = &bgmac->rx_ring[i];
482 ring->num_slots = BGMAC_RX_RING_SLOTS;
483 ring->mmio_base = ring_base[i];
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000484
485 /* Alloc ring of descriptors */
486 size = ring->num_slots * sizeof(struct bgmac_dma_desc);
487 ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
488 &ring->dma_base,
489 GFP_KERNEL);
490 if (!ring->cpu_base) {
491 bgmac_err(bgmac, "Allocation of RX ring 0x%X failed\n",
492 ring->mmio_base);
493 err = -ENOMEM;
494 goto err_dma_free;
495 }
496 if (ring->dma_base & 0xC0000000)
497 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
498
Rafał Miłecki99003032013-09-15 23:13:18 +0200499 ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
500 BGMAC_DMA_RING_RX);
501 if (ring->unaligned)
502 ring->index_base = lower_32_bits(ring->dma_base);
503 else
504 ring->index_base = 0;
505
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000506 /* Alloc RX slots */
Rafał Miłecki70a737b2013-02-25 08:22:26 +0000507 for (j = 0; j < ring->num_slots; j++) {
508 err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000509 if (err) {
510 bgmac_err(bgmac, "Can't allocate skb for slot in RX ring\n");
511 goto err_dma_free;
512 }
513 }
514 }
515
516 return 0;
517
518err_dma_free:
519 bgmac_dma_free(bgmac);
520 return -ENOMEM;
521}
522
523static void bgmac_dma_init(struct bgmac *bgmac)
524{
525 struct bgmac_dma_ring *ring;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000526 int i;
527
528 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
529 ring = &bgmac->tx_ring[i];
530
Rafał Miłecki99003032013-09-15 23:13:18 +0200531 if (!ring->unaligned)
532 bgmac_dma_tx_enable(bgmac, ring);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000533 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
534 lower_32_bits(ring->dma_base));
535 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI,
536 upper_32_bits(ring->dma_base));
Rafał Miłecki99003032013-09-15 23:13:18 +0200537 if (ring->unaligned)
538 bgmac_dma_tx_enable(bgmac, ring);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000539
540 ring->start = 0;
541 ring->end = 0; /* Points the slot that should *not* be read */
542 }
543
544 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
Rafał Miłecki70a737b2013-02-25 08:22:26 +0000545 int j;
546
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000547 ring = &bgmac->rx_ring[i];
548
Rafał Miłecki99003032013-09-15 23:13:18 +0200549 if (!ring->unaligned)
550 bgmac_dma_rx_enable(bgmac, ring);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000551 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
552 lower_32_bits(ring->dma_base));
553 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI,
554 upper_32_bits(ring->dma_base));
Rafał Miłecki99003032013-09-15 23:13:18 +0200555 if (ring->unaligned)
556 bgmac_dma_rx_enable(bgmac, ring);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000557
Rafał Miłeckid549c76b2013-10-28 14:40:29 +0100558 for (j = 0; j < ring->num_slots; j++)
559 bgmac_dma_rx_setup_desc(bgmac, ring, j);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000560
561 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
Rafał Miłecki99003032013-09-15 23:13:18 +0200562 ring->index_base +
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000563 ring->num_slots * sizeof(struct bgmac_dma_desc));
564
565 ring->start = 0;
566 ring->end = 0;
567 }
568}
569
570/**************************************************
571 * PHY ops
572 **************************************************/
573
Rafał Miłecki217a55a2013-02-12 23:14:51 +0000574static u16 bgmac_phy_read(struct bgmac *bgmac, u8 phyaddr, u8 reg)
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000575{
576 struct bcma_device *core;
577 u16 phy_access_addr;
578 u16 phy_ctl_addr;
579 u32 tmp;
580
581 BUILD_BUG_ON(BGMAC_PA_DATA_MASK != BCMA_GMAC_CMN_PA_DATA_MASK);
582 BUILD_BUG_ON(BGMAC_PA_ADDR_MASK != BCMA_GMAC_CMN_PA_ADDR_MASK);
583 BUILD_BUG_ON(BGMAC_PA_ADDR_SHIFT != BCMA_GMAC_CMN_PA_ADDR_SHIFT);
584 BUILD_BUG_ON(BGMAC_PA_REG_MASK != BCMA_GMAC_CMN_PA_REG_MASK);
585 BUILD_BUG_ON(BGMAC_PA_REG_SHIFT != BCMA_GMAC_CMN_PA_REG_SHIFT);
586 BUILD_BUG_ON(BGMAC_PA_WRITE != BCMA_GMAC_CMN_PA_WRITE);
587 BUILD_BUG_ON(BGMAC_PA_START != BCMA_GMAC_CMN_PA_START);
588 BUILD_BUG_ON(BGMAC_PC_EPA_MASK != BCMA_GMAC_CMN_PC_EPA_MASK);
589 BUILD_BUG_ON(BGMAC_PC_MCT_MASK != BCMA_GMAC_CMN_PC_MCT_MASK);
590 BUILD_BUG_ON(BGMAC_PC_MCT_SHIFT != BCMA_GMAC_CMN_PC_MCT_SHIFT);
591 BUILD_BUG_ON(BGMAC_PC_MTE != BCMA_GMAC_CMN_PC_MTE);
592
593 if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) {
594 core = bgmac->core->bus->drv_gmac_cmn.core;
595 phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS;
596 phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL;
597 } else {
598 core = bgmac->core;
599 phy_access_addr = BGMAC_PHY_ACCESS;
600 phy_ctl_addr = BGMAC_PHY_CNTL;
601 }
602
603 tmp = bcma_read32(core, phy_ctl_addr);
604 tmp &= ~BGMAC_PC_EPA_MASK;
605 tmp |= phyaddr;
606 bcma_write32(core, phy_ctl_addr, tmp);
607
608 tmp = BGMAC_PA_START;
609 tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT;
610 tmp |= reg << BGMAC_PA_REG_SHIFT;
611 bcma_write32(core, phy_access_addr, tmp);
612
613 if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) {
614 bgmac_err(bgmac, "Reading PHY %d register 0x%X failed\n",
615 phyaddr, reg);
616 return 0xffff;
617 }
618
619 return bcma_read32(core, phy_access_addr) & BGMAC_PA_DATA_MASK;
620}
621
622/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphywr */
Rafał Miłecki217a55a2013-02-12 23:14:51 +0000623static int bgmac_phy_write(struct bgmac *bgmac, u8 phyaddr, u8 reg, u16 value)
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000624{
625 struct bcma_device *core;
626 u16 phy_access_addr;
627 u16 phy_ctl_addr;
628 u32 tmp;
629
630 if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) {
631 core = bgmac->core->bus->drv_gmac_cmn.core;
632 phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS;
633 phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL;
634 } else {
635 core = bgmac->core;
636 phy_access_addr = BGMAC_PHY_ACCESS;
637 phy_ctl_addr = BGMAC_PHY_CNTL;
638 }
639
640 tmp = bcma_read32(core, phy_ctl_addr);
641 tmp &= ~BGMAC_PC_EPA_MASK;
642 tmp |= phyaddr;
643 bcma_write32(core, phy_ctl_addr, tmp);
644
645 bgmac_write(bgmac, BGMAC_INT_STATUS, BGMAC_IS_MDIO);
646 if (bgmac_read(bgmac, BGMAC_INT_STATUS) & BGMAC_IS_MDIO)
647 bgmac_warn(bgmac, "Error setting MDIO int\n");
648
649 tmp = BGMAC_PA_START;
650 tmp |= BGMAC_PA_WRITE;
651 tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT;
652 tmp |= reg << BGMAC_PA_REG_SHIFT;
653 tmp |= value;
654 bcma_write32(core, phy_access_addr, tmp);
655
Rafał Miłecki217a55a2013-02-12 23:14:51 +0000656 if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) {
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000657 bgmac_err(bgmac, "Writing to PHY %d register 0x%X failed\n",
658 phyaddr, reg);
Rafał Miłecki217a55a2013-02-12 23:14:51 +0000659 return -ETIMEDOUT;
660 }
661
662 return 0;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000663}
664
665/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyforce */
666static void bgmac_phy_force(struct bgmac *bgmac)
667{
668 u16 ctl;
669 u16 mask = ~(BGMAC_PHY_CTL_SPEED | BGMAC_PHY_CTL_SPEED_MSB |
670 BGMAC_PHY_CTL_ANENAB | BGMAC_PHY_CTL_DUPLEX);
671
672 if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
673 return;
674
675 if (bgmac->autoneg)
676 return;
677
678 ctl = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL);
679 ctl &= mask;
680 if (bgmac->full_duplex)
681 ctl |= BGMAC_PHY_CTL_DUPLEX;
682 if (bgmac->speed == BGMAC_SPEED_100)
683 ctl |= BGMAC_PHY_CTL_SPEED_100;
684 else if (bgmac->speed == BGMAC_SPEED_1000)
685 ctl |= BGMAC_PHY_CTL_SPEED_1000;
686 bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL, ctl);
687}
688
689/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyadvertise */
690static void bgmac_phy_advertise(struct bgmac *bgmac)
691{
692 u16 adv;
693
694 if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
695 return;
696
697 if (!bgmac->autoneg)
698 return;
699
700 /* Adv selected 10/100 speeds */
701 adv = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV);
702 adv &= ~(BGMAC_PHY_ADV_10HALF | BGMAC_PHY_ADV_10FULL |
703 BGMAC_PHY_ADV_100HALF | BGMAC_PHY_ADV_100FULL);
704 if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_10)
705 adv |= BGMAC_PHY_ADV_10HALF;
706 if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_100)
707 adv |= BGMAC_PHY_ADV_100HALF;
708 if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_10)
709 adv |= BGMAC_PHY_ADV_10FULL;
710 if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_100)
711 adv |= BGMAC_PHY_ADV_100FULL;
712 bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV, adv);
713
714 /* Adv selected 1000 speeds */
715 adv = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV2);
716 adv &= ~(BGMAC_PHY_ADV2_1000HALF | BGMAC_PHY_ADV2_1000FULL);
717 if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_1000)
718 adv |= BGMAC_PHY_ADV2_1000HALF;
719 if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_1000)
720 adv |= BGMAC_PHY_ADV2_1000FULL;
721 bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV2, adv);
722
723 /* Restart */
724 bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL,
725 bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL) |
726 BGMAC_PHY_CTL_RESTART);
727}
728
729/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyinit */
730static void bgmac_phy_init(struct bgmac *bgmac)
731{
732 struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo;
733 struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
734 u8 i;
735
736 if (ci->id == BCMA_CHIP_ID_BCM5356) {
737 for (i = 0; i < 5; i++) {
738 bgmac_phy_write(bgmac, i, 0x1f, 0x008b);
739 bgmac_phy_write(bgmac, i, 0x15, 0x0100);
740 bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
741 bgmac_phy_write(bgmac, i, 0x12, 0x2aaa);
742 bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
743 }
744 }
745 if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg != 10) ||
746 (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg != 10) ||
747 (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg != 9)) {
748 bcma_chipco_chipctl_maskset(cc, 2, ~0xc0000000, 0);
749 bcma_chipco_chipctl_maskset(cc, 4, ~0x80000000, 0);
750 for (i = 0; i < 5; i++) {
751 bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
752 bgmac_phy_write(bgmac, i, 0x16, 0x5284);
753 bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
754 bgmac_phy_write(bgmac, i, 0x17, 0x0010);
755 bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
756 bgmac_phy_write(bgmac, i, 0x16, 0x5296);
757 bgmac_phy_write(bgmac, i, 0x17, 0x1073);
758 bgmac_phy_write(bgmac, i, 0x17, 0x9073);
759 bgmac_phy_write(bgmac, i, 0x16, 0x52b6);
760 bgmac_phy_write(bgmac, i, 0x17, 0x9273);
761 bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
762 }
763 }
764}
765
766/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyreset */
767static void bgmac_phy_reset(struct bgmac *bgmac)
768{
769 if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
770 return;
771
772 bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL,
773 BGMAC_PHY_CTL_RESET);
774 udelay(100);
775 if (bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL) &
776 BGMAC_PHY_CTL_RESET)
777 bgmac_err(bgmac, "PHY reset failed\n");
778 bgmac_phy_init(bgmac);
779}
780
781/**************************************************
782 * Chip ops
783 **************************************************/
784
785/* TODO: can we just drop @force? Can we don't reset MAC at all if there is
786 * nothing to change? Try if after stabilizng driver.
787 */
788static void bgmac_cmdcfg_maskset(struct bgmac *bgmac, u32 mask, u32 set,
789 bool force)
790{
791 u32 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
792 u32 new_val = (cmdcfg & mask) | set;
793
794 bgmac_set(bgmac, BGMAC_CMDCFG, BGMAC_CMDCFG_SR);
795 udelay(2);
796
797 if (new_val != cmdcfg || force)
798 bgmac_write(bgmac, BGMAC_CMDCFG, new_val);
799
800 bgmac_mask(bgmac, BGMAC_CMDCFG, ~BGMAC_CMDCFG_SR);
801 udelay(2);
802}
803
Hauke Mehrtens4e209002013-02-06 04:44:58 +0000804static void bgmac_write_mac_address(struct bgmac *bgmac, u8 *addr)
805{
806 u32 tmp;
807
808 tmp = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
809 bgmac_write(bgmac, BGMAC_MACADDR_HIGH, tmp);
810 tmp = (addr[4] << 8) | addr[5];
811 bgmac_write(bgmac, BGMAC_MACADDR_LOW, tmp);
812}
813
Hauke Mehrtensc6edfe12013-02-06 05:51:49 +0000814static void bgmac_set_rx_mode(struct net_device *net_dev)
815{
816 struct bgmac *bgmac = netdev_priv(net_dev);
817
818 if (net_dev->flags & IFF_PROMISC)
Rafał Miłeckie9ba1032013-02-07 05:40:38 +0000819 bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_PROM, true);
Hauke Mehrtensc6edfe12013-02-06 05:51:49 +0000820 else
Rafał Miłeckie9ba1032013-02-07 05:40:38 +0000821 bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_PROM, 0, true);
Hauke Mehrtensc6edfe12013-02-06 05:51:49 +0000822}
823
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000824#if 0 /* We don't use that regs yet */
825static void bgmac_chip_stats_update(struct bgmac *bgmac)
826{
827 int i;
828
829 if (bgmac->core->id.id != BCMA_CORE_4706_MAC_GBIT) {
830 for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++)
831 bgmac->mib_tx_regs[i] =
832 bgmac_read(bgmac,
833 BGMAC_TX_GOOD_OCTETS + (i * 4));
834 for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++)
835 bgmac->mib_rx_regs[i] =
836 bgmac_read(bgmac,
837 BGMAC_RX_GOOD_OCTETS + (i * 4));
838 }
839
840 /* TODO: what else? how to handle BCM4706? Specs are needed */
841}
842#endif
843
844static void bgmac_clear_mib(struct bgmac *bgmac)
845{
846 int i;
847
848 if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT)
849 return;
850
851 bgmac_set(bgmac, BGMAC_DEV_CTL, BGMAC_DC_MROR);
852 for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++)
853 bgmac_read(bgmac, BGMAC_TX_GOOD_OCTETS + (i * 4));
854 for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++)
855 bgmac_read(bgmac, BGMAC_RX_GOOD_OCTETS + (i * 4));
856}
857
858/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_speed */
859static void bgmac_speed(struct bgmac *bgmac, int speed)
860{
861 u32 mask = ~(BGMAC_CMDCFG_ES_MASK | BGMAC_CMDCFG_HD);
862 u32 set = 0;
863
864 if (speed & BGMAC_SPEED_10)
865 set |= BGMAC_CMDCFG_ES_10;
866 if (speed & BGMAC_SPEED_100)
867 set |= BGMAC_CMDCFG_ES_100;
868 if (speed & BGMAC_SPEED_1000)
869 set |= BGMAC_CMDCFG_ES_1000;
870 if (!bgmac->full_duplex)
871 set |= BGMAC_CMDCFG_HD;
872 bgmac_cmdcfg_maskset(bgmac, mask, set, true);
873}
874
875static void bgmac_miiconfig(struct bgmac *bgmac)
876{
877 u8 imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
878 BGMAC_DS_MM_SHIFT;
879 if (imode == 0 || imode == 1) {
880 if (bgmac->autoneg)
881 bgmac_speed(bgmac, BGMAC_SPEED_100);
882 else
883 bgmac_speed(bgmac, bgmac->speed);
884 }
885}
886
887/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipreset */
888static void bgmac_chip_reset(struct bgmac *bgmac)
889{
890 struct bcma_device *core = bgmac->core;
891 struct bcma_bus *bus = core->bus;
892 struct bcma_chipinfo *ci = &bus->chipinfo;
893 u32 flags = 0;
894 u32 iost;
895 int i;
896
897 if (bcma_core_is_enabled(core)) {
898 if (!bgmac->stats_grabbed) {
899 /* bgmac_chip_stats_update(bgmac); */
900 bgmac->stats_grabbed = true;
901 }
902
903 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
904 bgmac_dma_tx_reset(bgmac, &bgmac->tx_ring[i]);
905
906 bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
907 udelay(1);
908
909 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
910 bgmac_dma_rx_reset(bgmac, &bgmac->rx_ring[i]);
911
912 /* TODO: Clear software multicast filter list */
913 }
914
915 iost = bcma_aread32(core, BCMA_IOST);
916 if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == 10) ||
917 (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) ||
918 (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == 9))
919 iost &= ~BGMAC_BCMA_IOST_ATTACHED;
920
921 if (iost & BGMAC_BCMA_IOST_ATTACHED) {
922 flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
923 if (!bgmac->has_robosw)
924 flags |= BGMAC_BCMA_IOCTL_SW_RESET;
925 }
926
927 bcma_core_enable(core, flags);
928
929 if (core->id.rev > 2) {
930 bgmac_set(bgmac, BCMA_CLKCTLST, 1 << 8);
931 bgmac_wait_value(bgmac->core, BCMA_CLKCTLST, 1 << 24, 1 << 24,
932 1000);
933 }
934
935 if (ci->id == BCMA_CHIP_ID_BCM5357 || ci->id == BCMA_CHIP_ID_BCM4749 ||
936 ci->id == BCMA_CHIP_ID_BCM53572) {
937 struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
938 u8 et_swtype = 0;
939 u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY |
Rafał Miłecki6a391e72013-09-15 00:22:47 +0200940 BGMAC_CHIPCTL_1_IF_TYPE_MII;
Hauke Mehrtens36472682013-09-15 22:49:08 +0200941 char buf[4];
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000942
Hauke Mehrtens36472682013-09-15 22:49:08 +0200943 if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) {
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000944 if (kstrtou8(buf, 0, &et_swtype))
945 bgmac_err(bgmac, "Failed to parse et_swtype (%s)\n",
946 buf);
947 et_swtype &= 0x0f;
948 et_swtype <<= 4;
949 sw_type = et_swtype;
950 } else if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == 9) {
951 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII;
Hauke Mehrtensb5a4c2f2013-02-06 04:44:57 +0000952 } else if ((ci->id != BCMA_CHIP_ID_BCM53572 && ci->pkg == 10) ||
953 (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == 9)) {
954 sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RGMII |
955 BGMAC_CHIPCTL_1_SW_TYPE_RGMII;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000956 }
957 bcma_chipco_chipctl_maskset(cc, 1,
958 ~(BGMAC_CHIPCTL_1_IF_TYPE_MASK |
959 BGMAC_CHIPCTL_1_SW_TYPE_MASK),
960 sw_type);
961 }
962
963 if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw)
964 bcma_awrite32(core, BCMA_IOCTL,
965 bcma_aread32(core, BCMA_IOCTL) &
966 ~BGMAC_BCMA_IOCTL_SW_RESET);
967
968 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset
969 * Specs don't say about using BGMAC_CMDCFG_SR, but in this routine
970 * BGMAC_CMDCFG is read _after_ putting chip in a reset. So it has to
971 * be keps until taking MAC out of the reset.
972 */
973 bgmac_cmdcfg_maskset(bgmac,
974 ~(BGMAC_CMDCFG_TE |
975 BGMAC_CMDCFG_RE |
976 BGMAC_CMDCFG_RPI |
977 BGMAC_CMDCFG_TAI |
978 BGMAC_CMDCFG_HD |
979 BGMAC_CMDCFG_ML |
980 BGMAC_CMDCFG_CFE |
981 BGMAC_CMDCFG_RL |
982 BGMAC_CMDCFG_RED |
983 BGMAC_CMDCFG_PE |
984 BGMAC_CMDCFG_TPI |
985 BGMAC_CMDCFG_PAD_EN |
986 BGMAC_CMDCFG_PF),
987 BGMAC_CMDCFG_PROM |
988 BGMAC_CMDCFG_NLC |
989 BGMAC_CMDCFG_CFE |
990 BGMAC_CMDCFG_SR,
991 false);
992
993 bgmac_clear_mib(bgmac);
994 if (core->id.id == BCMA_CORE_4706_MAC_GBIT)
995 bcma_maskset32(bgmac->cmn, BCMA_GMAC_CMN_PHY_CTL, ~0,
996 BCMA_GMAC_CMN_PC_MTE);
997 else
998 bgmac_set(bgmac, BGMAC_PHY_CNTL, BGMAC_PC_MTE);
999 bgmac_miiconfig(bgmac);
1000 bgmac_phy_init(bgmac);
1001
Hauke Mehrtens49a467b2013-09-29 13:54:58 +02001002 netdev_reset_queue(bgmac->net_dev);
1003
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001004 bgmac->int_status = 0;
1005}
1006
1007static void bgmac_chip_intrs_on(struct bgmac *bgmac)
1008{
1009 bgmac_write(bgmac, BGMAC_INT_MASK, bgmac->int_mask);
1010}
1011
1012static void bgmac_chip_intrs_off(struct bgmac *bgmac)
1013{
1014 bgmac_write(bgmac, BGMAC_INT_MASK, 0);
Nathan Hintz41608152013-02-13 19:14:10 +00001015 bgmac_read(bgmac, BGMAC_INT_MASK);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001016}
1017
1018/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_enable */
1019static void bgmac_enable(struct bgmac *bgmac)
1020{
1021 struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo;
1022 u32 cmdcfg;
1023 u32 mode;
1024 u32 rxq_ctl;
1025 u32 fl_ctl;
1026 u16 bp_clk;
1027 u8 mdp;
1028
1029 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
1030 bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE),
1031 BGMAC_CMDCFG_SR, true);
1032 udelay(2);
1033 cmdcfg |= BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE;
1034 bgmac_write(bgmac, BGMAC_CMDCFG, cmdcfg);
1035
1036 mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
1037 BGMAC_DS_MM_SHIFT;
1038 if (ci->id != BCMA_CHIP_ID_BCM47162 || mode != 0)
1039 bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT);
1040 if (ci->id == BCMA_CHIP_ID_BCM47162 && mode == 2)
1041 bcma_chipco_chipctl_maskset(&bgmac->core->bus->drv_cc, 1, ~0,
1042 BGMAC_CHIPCTL_1_RXC_DLL_BYPASS);
1043
1044 switch (ci->id) {
1045 case BCMA_CHIP_ID_BCM5357:
1046 case BCMA_CHIP_ID_BCM4749:
1047 case BCMA_CHIP_ID_BCM53572:
1048 case BCMA_CHIP_ID_BCM4716:
1049 case BCMA_CHIP_ID_BCM47162:
1050 fl_ctl = 0x03cb04cb;
1051 if (ci->id == BCMA_CHIP_ID_BCM5357 ||
1052 ci->id == BCMA_CHIP_ID_BCM4749 ||
1053 ci->id == BCMA_CHIP_ID_BCM53572)
1054 fl_ctl = 0x2300e1;
1055 bgmac_write(bgmac, BGMAC_FLOW_CTL_THRESH, fl_ctl);
1056 bgmac_write(bgmac, BGMAC_PAUSE_CTL, 0x27fff);
1057 break;
1058 }
1059
1060 rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL);
1061 rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK;
1062 bp_clk = bcma_pmu_get_bus_clock(&bgmac->core->bus->drv_cc) / 1000000;
1063 mdp = (bp_clk * 128 / 1000) - 3;
1064 rxq_ctl |= (mdp << BGMAC_RXQ_CTL_MDP_SHIFT);
1065 bgmac_write(bgmac, BGMAC_RXQ_CTL, rxq_ctl);
1066}
1067
1068/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipinit */
1069static void bgmac_chip_init(struct bgmac *bgmac, bool full_init)
1070{
1071 struct bgmac_dma_ring *ring;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001072 int i;
1073
1074 /* 1 interrupt per received frame */
1075 bgmac_write(bgmac, BGMAC_INT_RECV_LAZY, 1 << BGMAC_IRL_FC_SHIFT);
1076
1077 /* Enable 802.3x tx flow control (honor received PAUSE frames) */
1078 bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_RPI, 0, true);
1079
Hauke Mehrtensc6edfe12013-02-06 05:51:49 +00001080 bgmac_set_rx_mode(bgmac->net_dev);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001081
Hauke Mehrtens4e209002013-02-06 04:44:58 +00001082 bgmac_write_mac_address(bgmac, bgmac->net_dev->dev_addr);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001083
1084 if (bgmac->loopback)
Rafał Miłeckie9ba1032013-02-07 05:40:38 +00001085 bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001086 else
Rafał Miłeckie9ba1032013-02-07 05:40:38 +00001087 bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_ML, 0, false);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001088
1089 bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + ETHER_MAX_LEN);
1090
1091 if (!bgmac->autoneg) {
1092 bgmac_speed(bgmac, bgmac->speed);
1093 bgmac_phy_force(bgmac);
1094 } else if (bgmac->speed) { /* if there is anything to adv */
1095 bgmac_phy_advertise(bgmac);
1096 }
1097
1098 if (full_init) {
1099 bgmac_dma_init(bgmac);
1100 if (1) /* FIXME: is there any case we don't want IRQs? */
1101 bgmac_chip_intrs_on(bgmac);
1102 } else {
1103 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
1104 ring = &bgmac->rx_ring[i];
1105 bgmac_dma_rx_enable(bgmac, ring);
1106 }
1107 }
1108
1109 bgmac_enable(bgmac);
1110}
1111
1112static irqreturn_t bgmac_interrupt(int irq, void *dev_id)
1113{
1114 struct bgmac *bgmac = netdev_priv(dev_id);
1115
1116 u32 int_status = bgmac_read(bgmac, BGMAC_INT_STATUS);
1117 int_status &= bgmac->int_mask;
1118
1119 if (!int_status)
1120 return IRQ_NONE;
1121
1122 /* Ack */
1123 bgmac_write(bgmac, BGMAC_INT_STATUS, int_status);
1124
1125 /* Disable new interrupts until handling existing ones */
1126 bgmac_chip_intrs_off(bgmac);
1127
1128 bgmac->int_status = int_status;
1129
1130 napi_schedule(&bgmac->napi);
1131
1132 return IRQ_HANDLED;
1133}
1134
1135static int bgmac_poll(struct napi_struct *napi, int weight)
1136{
1137 struct bgmac *bgmac = container_of(napi, struct bgmac, napi);
1138 struct bgmac_dma_ring *ring;
1139 int handled = 0;
1140
1141 if (bgmac->int_status & BGMAC_IS_TX0) {
1142 ring = &bgmac->tx_ring[0];
1143 bgmac_dma_tx_free(bgmac, ring);
1144 bgmac->int_status &= ~BGMAC_IS_TX0;
1145 }
1146
1147 if (bgmac->int_status & BGMAC_IS_RX) {
1148 ring = &bgmac->rx_ring[0];
1149 handled += bgmac_dma_rx_read(bgmac, ring, weight);
1150 bgmac->int_status &= ~BGMAC_IS_RX;
1151 }
1152
1153 if (bgmac->int_status) {
1154 bgmac_err(bgmac, "Unknown IRQs: 0x%08X\n", bgmac->int_status);
1155 bgmac->int_status = 0;
1156 }
1157
1158 if (handled < weight)
1159 napi_complete(napi);
1160
1161 bgmac_chip_intrs_on(bgmac);
1162
1163 return handled;
1164}
1165
1166/**************************************************
1167 * net_device_ops
1168 **************************************************/
1169
1170static int bgmac_open(struct net_device *net_dev)
1171{
1172 struct bgmac *bgmac = netdev_priv(net_dev);
1173 int err = 0;
1174
1175 bgmac_chip_reset(bgmac);
1176 /* Specs say about reclaiming rings here, but we do that in DMA init */
1177 bgmac_chip_init(bgmac, true);
1178
1179 err = request_irq(bgmac->core->irq, bgmac_interrupt, IRQF_SHARED,
1180 KBUILD_MODNAME, net_dev);
1181 if (err < 0) {
1182 bgmac_err(bgmac, "IRQ request error: %d!\n", err);
1183 goto err_out;
1184 }
1185 napi_enable(&bgmac->napi);
1186
1187 netif_carrier_on(net_dev);
1188
1189err_out:
1190 return err;
1191}
1192
1193static int bgmac_stop(struct net_device *net_dev)
1194{
1195 struct bgmac *bgmac = netdev_priv(net_dev);
1196
1197 netif_carrier_off(net_dev);
1198
1199 napi_disable(&bgmac->napi);
1200 bgmac_chip_intrs_off(bgmac);
1201 free_irq(bgmac->core->irq, net_dev);
1202
1203 bgmac_chip_reset(bgmac);
1204
1205 return 0;
1206}
1207
1208static netdev_tx_t bgmac_start_xmit(struct sk_buff *skb,
1209 struct net_device *net_dev)
1210{
1211 struct bgmac *bgmac = netdev_priv(net_dev);
1212 struct bgmac_dma_ring *ring;
1213
1214 /* No QOS support yet */
1215 ring = &bgmac->tx_ring[0];
1216 return bgmac_dma_tx_add(bgmac, ring, skb);
1217}
1218
Hauke Mehrtens4e209002013-02-06 04:44:58 +00001219static int bgmac_set_mac_address(struct net_device *net_dev, void *addr)
1220{
1221 struct bgmac *bgmac = netdev_priv(net_dev);
1222 int ret;
1223
1224 ret = eth_prepare_mac_addr_change(net_dev, addr);
1225 if (ret < 0)
1226 return ret;
1227 bgmac_write_mac_address(bgmac, (u8 *)addr);
1228 eth_commit_mac_addr_change(net_dev, addr);
1229 return 0;
1230}
1231
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001232static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
1233{
1234 struct bgmac *bgmac = netdev_priv(net_dev);
1235 struct mii_ioctl_data *data = if_mii(ifr);
1236
1237 switch (cmd) {
1238 case SIOCGMIIPHY:
1239 data->phy_id = bgmac->phyaddr;
1240 /* fallthru */
1241 case SIOCGMIIREG:
1242 if (!netif_running(net_dev))
1243 return -EAGAIN;
1244 data->val_out = bgmac_phy_read(bgmac, data->phy_id,
1245 data->reg_num & 0x1f);
1246 return 0;
1247 case SIOCSMIIREG:
1248 if (!netif_running(net_dev))
1249 return -EAGAIN;
1250 bgmac_phy_write(bgmac, data->phy_id, data->reg_num & 0x1f,
1251 data->val_in);
1252 return 0;
1253 default:
1254 return -EOPNOTSUPP;
1255 }
1256}
1257
1258static const struct net_device_ops bgmac_netdev_ops = {
1259 .ndo_open = bgmac_open,
1260 .ndo_stop = bgmac_stop,
1261 .ndo_start_xmit = bgmac_start_xmit,
Hauke Mehrtensc6edfe12013-02-06 05:51:49 +00001262 .ndo_set_rx_mode = bgmac_set_rx_mode,
Hauke Mehrtens4e209002013-02-06 04:44:58 +00001263 .ndo_set_mac_address = bgmac_set_mac_address,
Hauke Mehrtens522c5902013-02-06 04:44:59 +00001264 .ndo_validate_addr = eth_validate_addr,
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001265 .ndo_do_ioctl = bgmac_ioctl,
1266};
1267
1268/**************************************************
1269 * ethtool_ops
1270 **************************************************/
1271
1272static int bgmac_get_settings(struct net_device *net_dev,
1273 struct ethtool_cmd *cmd)
1274{
1275 struct bgmac *bgmac = netdev_priv(net_dev);
1276
1277 cmd->supported = SUPPORTED_10baseT_Half |
1278 SUPPORTED_10baseT_Full |
1279 SUPPORTED_100baseT_Half |
1280 SUPPORTED_100baseT_Full |
1281 SUPPORTED_1000baseT_Half |
1282 SUPPORTED_1000baseT_Full |
1283 SUPPORTED_Autoneg;
1284
1285 if (bgmac->autoneg) {
1286 WARN_ON(cmd->advertising);
1287 if (bgmac->full_duplex) {
1288 if (bgmac->speed & BGMAC_SPEED_10)
1289 cmd->advertising |= ADVERTISED_10baseT_Full;
1290 if (bgmac->speed & BGMAC_SPEED_100)
1291 cmd->advertising |= ADVERTISED_100baseT_Full;
1292 if (bgmac->speed & BGMAC_SPEED_1000)
1293 cmd->advertising |= ADVERTISED_1000baseT_Full;
1294 } else {
1295 if (bgmac->speed & BGMAC_SPEED_10)
1296 cmd->advertising |= ADVERTISED_10baseT_Half;
1297 if (bgmac->speed & BGMAC_SPEED_100)
1298 cmd->advertising |= ADVERTISED_100baseT_Half;
1299 if (bgmac->speed & BGMAC_SPEED_1000)
1300 cmd->advertising |= ADVERTISED_1000baseT_Half;
1301 }
1302 } else {
1303 switch (bgmac->speed) {
1304 case BGMAC_SPEED_10:
1305 ethtool_cmd_speed_set(cmd, SPEED_10);
1306 break;
1307 case BGMAC_SPEED_100:
1308 ethtool_cmd_speed_set(cmd, SPEED_100);
1309 break;
1310 case BGMAC_SPEED_1000:
1311 ethtool_cmd_speed_set(cmd, SPEED_1000);
1312 break;
1313 }
1314 }
1315
1316 cmd->duplex = bgmac->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
1317
1318 cmd->autoneg = bgmac->autoneg;
1319
1320 return 0;
1321}
1322
1323#if 0
1324static int bgmac_set_settings(struct net_device *net_dev,
1325 struct ethtool_cmd *cmd)
1326{
1327 struct bgmac *bgmac = netdev_priv(net_dev);
1328
1329 return -1;
1330}
1331#endif
1332
1333static void bgmac_get_drvinfo(struct net_device *net_dev,
1334 struct ethtool_drvinfo *info)
1335{
1336 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1337 strlcpy(info->bus_info, "BCMA", sizeof(info->bus_info));
1338}
1339
1340static const struct ethtool_ops bgmac_ethtool_ops = {
1341 .get_settings = bgmac_get_settings,
1342 .get_drvinfo = bgmac_get_drvinfo,
1343};
1344
1345/**************************************************
Rafał Miłecki11e5e762013-03-07 01:53:28 +00001346 * MII
1347 **************************************************/
1348
1349static int bgmac_mii_read(struct mii_bus *bus, int mii_id, int regnum)
1350{
1351 return bgmac_phy_read(bus->priv, mii_id, regnum);
1352}
1353
1354static int bgmac_mii_write(struct mii_bus *bus, int mii_id, int regnum,
1355 u16 value)
1356{
1357 return bgmac_phy_write(bus->priv, mii_id, regnum, value);
1358}
1359
1360static int bgmac_mii_register(struct bgmac *bgmac)
1361{
1362 struct mii_bus *mii_bus;
1363 int i, err = 0;
1364
1365 mii_bus = mdiobus_alloc();
1366 if (!mii_bus)
1367 return -ENOMEM;
1368
1369 mii_bus->name = "bgmac mii bus";
1370 sprintf(mii_bus->id, "%s-%d-%d", "bgmac", bgmac->core->bus->num,
1371 bgmac->core->core_unit);
1372 mii_bus->priv = bgmac;
1373 mii_bus->read = bgmac_mii_read;
1374 mii_bus->write = bgmac_mii_write;
1375 mii_bus->parent = &bgmac->core->dev;
1376 mii_bus->phy_mask = ~(1 << bgmac->phyaddr);
1377
1378 mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
1379 if (!mii_bus->irq) {
1380 err = -ENOMEM;
1381 goto err_free_bus;
1382 }
1383 for (i = 0; i < PHY_MAX_ADDR; i++)
1384 mii_bus->irq[i] = PHY_POLL;
1385
1386 err = mdiobus_register(mii_bus);
1387 if (err) {
1388 bgmac_err(bgmac, "Registration of mii bus failed\n");
1389 goto err_free_irq;
1390 }
1391
1392 bgmac->mii_bus = mii_bus;
1393
1394 return err;
1395
1396err_free_irq:
1397 kfree(mii_bus->irq);
1398err_free_bus:
1399 mdiobus_free(mii_bus);
1400 return err;
1401}
1402
1403static void bgmac_mii_unregister(struct bgmac *bgmac)
1404{
1405 struct mii_bus *mii_bus = bgmac->mii_bus;
1406
1407 mdiobus_unregister(mii_bus);
1408 kfree(mii_bus->irq);
1409 mdiobus_free(mii_bus);
1410}
1411
1412/**************************************************
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001413 * BCMA bus ops
1414 **************************************************/
1415
1416/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipattach */
1417static int bgmac_probe(struct bcma_device *core)
1418{
1419 struct net_device *net_dev;
1420 struct bgmac *bgmac;
1421 struct ssb_sprom *sprom = &core->bus->sprom;
1422 u8 *mac = core->core_unit ? sprom->et1mac : sprom->et0mac;
1423 int err;
1424
1425 /* We don't support 2nd, 3rd, ... units, SPROM has to be adjusted */
1426 if (core->core_unit > 1) {
1427 pr_err("Unsupported core_unit %d\n", core->core_unit);
1428 return -ENOTSUPP;
1429 }
1430
Rafał Miłeckid166f212013-02-07 00:27:17 +00001431 if (!is_valid_ether_addr(mac)) {
1432 dev_err(&core->dev, "Invalid MAC addr: %pM\n", mac);
1433 eth_random_addr(mac);
1434 dev_warn(&core->dev, "Using random MAC: %pM\n", mac);
1435 }
1436
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001437 /* Allocation and references */
1438 net_dev = alloc_etherdev(sizeof(*bgmac));
1439 if (!net_dev)
1440 return -ENOMEM;
1441 net_dev->netdev_ops = &bgmac_netdev_ops;
1442 net_dev->irq = core->irq;
1443 SET_ETHTOOL_OPS(net_dev, &bgmac_ethtool_ops);
1444 bgmac = netdev_priv(net_dev);
1445 bgmac->net_dev = net_dev;
1446 bgmac->core = core;
1447 bcma_set_drvdata(core, bgmac);
1448
1449 /* Defaults */
1450 bgmac->autoneg = true;
1451 bgmac->full_duplex = true;
1452 bgmac->speed = BGMAC_SPEED_10 | BGMAC_SPEED_100 | BGMAC_SPEED_1000;
1453 memcpy(bgmac->net_dev->dev_addr, mac, ETH_ALEN);
1454
1455 /* On BCM4706 we need common core to access PHY */
1456 if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
1457 !core->bus->drv_gmac_cmn.core) {
1458 bgmac_err(bgmac, "GMAC CMN core not found (required for BCM4706)\n");
1459 err = -ENODEV;
1460 goto err_netdev_free;
1461 }
1462 bgmac->cmn = core->bus->drv_gmac_cmn.core;
1463
1464 bgmac->phyaddr = core->core_unit ? sprom->et1phyaddr :
1465 sprom->et0phyaddr;
1466 bgmac->phyaddr &= BGMAC_PHY_MASK;
1467 if (bgmac->phyaddr == BGMAC_PHY_MASK) {
1468 bgmac_err(bgmac, "No PHY found\n");
1469 err = -ENODEV;
1470 goto err_netdev_free;
1471 }
1472 bgmac_info(bgmac, "Found PHY addr: %d%s\n", bgmac->phyaddr,
1473 bgmac->phyaddr == BGMAC_PHY_NOREGS ? " (NOREGS)" : "");
1474
1475 if (core->bus->hosttype == BCMA_HOSTTYPE_PCI) {
1476 bgmac_err(bgmac, "PCI setup not implemented\n");
1477 err = -ENOTSUPP;
1478 goto err_netdev_free;
1479 }
1480
1481 bgmac_chip_reset(bgmac);
1482
1483 err = bgmac_dma_alloc(bgmac);
1484 if (err) {
1485 bgmac_err(bgmac, "Unable to alloc memory for DMA\n");
1486 goto err_netdev_free;
1487 }
1488
1489 bgmac->int_mask = BGMAC_IS_ERRMASK | BGMAC_IS_RX | BGMAC_IS_TX_MASK;
Ralf Baechleedb15d82013-02-21 16:16:55 +01001490 if (bcm47xx_nvram_getenv("et0_no_txint", NULL, 0) == 0)
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001491 bgmac->int_mask &= ~BGMAC_IS_TX_MASK;
1492
1493 /* TODO: reset the external phy. Specs are needed */
1494 bgmac_phy_reset(bgmac);
1495
1496 bgmac->has_robosw = !!(core->bus->sprom.boardflags_lo &
1497 BGMAC_BFL_ENETROBO);
1498 if (bgmac->has_robosw)
1499 bgmac_warn(bgmac, "Support for Roboswitch not implemented\n");
1500
1501 if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM)
1502 bgmac_warn(bgmac, "Support for ADMtek ethernet switch not implemented\n");
1503
Rafał Miłecki11e5e762013-03-07 01:53:28 +00001504 err = bgmac_mii_register(bgmac);
1505 if (err) {
1506 bgmac_err(bgmac, "Cannot register MDIO\n");
1507 err = -ENOTSUPP;
1508 goto err_dma_free;
1509 }
1510
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001511 err = register_netdev(bgmac->net_dev);
1512 if (err) {
1513 bgmac_err(bgmac, "Cannot register net device\n");
1514 err = -ENOTSUPP;
Rafał Miłecki11e5e762013-03-07 01:53:28 +00001515 goto err_mii_unregister;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001516 }
1517
1518 netif_carrier_off(net_dev);
1519
1520 netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT);
1521
1522 return 0;
1523
Rafał Miłecki11e5e762013-03-07 01:53:28 +00001524err_mii_unregister:
1525 bgmac_mii_unregister(bgmac);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001526err_dma_free:
1527 bgmac_dma_free(bgmac);
1528
1529err_netdev_free:
1530 bcma_set_drvdata(core, NULL);
1531 free_netdev(net_dev);
1532
1533 return err;
1534}
1535
1536static void bgmac_remove(struct bcma_device *core)
1537{
1538 struct bgmac *bgmac = bcma_get_drvdata(core);
1539
1540 netif_napi_del(&bgmac->napi);
1541 unregister_netdev(bgmac->net_dev);
Rafał Miłecki11e5e762013-03-07 01:53:28 +00001542 bgmac_mii_unregister(bgmac);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001543 bgmac_dma_free(bgmac);
1544 bcma_set_drvdata(core, NULL);
1545 free_netdev(bgmac->net_dev);
1546}
1547
1548static struct bcma_driver bgmac_bcma_driver = {
1549 .name = KBUILD_MODNAME,
1550 .id_table = bgmac_bcma_tbl,
1551 .probe = bgmac_probe,
1552 .remove = bgmac_remove,
1553};
1554
1555static int __init bgmac_init(void)
1556{
1557 int err;
1558
1559 err = bcma_driver_register(&bgmac_bcma_driver);
1560 if (err)
1561 return err;
1562 pr_info("Broadcom 47xx GBit MAC driver loaded\n");
1563
1564 return 0;
1565}
1566
1567static void __exit bgmac_exit(void)
1568{
1569 bcma_driver_unregister(&bgmac_bcma_driver);
1570}
1571
1572module_init(bgmac_init)
1573module_exit(bgmac_exit)
1574
1575MODULE_AUTHOR("Rafał Miłecki");
1576MODULE_LICENSE("GPL");