blob: 39efb864267e887bcc8d062157ec90a36e9355f4 [file] [log] [blame]
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001/*
2 * Driver for (BCM4706)? GBit MAC core on BCMA bus.
3 *
4 * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
5 *
6 * Licensed under the GNU/GPL. See COPYING for details.
7 */
8
9#include "bgmac.h"
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/delay.h>
14#include <linux/etherdevice.h>
15#include <linux/mii.h>
Rafał Miłecki11e5e762013-03-07 01:53:28 +000016#include <linux/phy.h>
Rafał Miłeckidd4544f2013-01-08 20:06:23 +000017#include <linux/interrupt.h>
18#include <linux/dma-mapping.h>
Ralf Baechleedb15d82013-02-21 16:16:55 +010019#include <bcm47xx_nvram.h>
Rafał Miłeckidd4544f2013-01-08 20:06:23 +000020
21static const struct bcma_device_id bgmac_bcma_tbl[] = {
22 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_4706_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS),
23 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS),
24 BCMA_CORETABLE_END
25};
26MODULE_DEVICE_TABLE(bcma, bgmac_bcma_tbl);
27
28static bool bgmac_wait_value(struct bcma_device *core, u16 reg, u32 mask,
29 u32 value, int timeout)
30{
31 u32 val;
32 int i;
33
34 for (i = 0; i < timeout / 10; i++) {
35 val = bcma_read32(core, reg);
36 if ((val & mask) == value)
37 return true;
38 udelay(10);
39 }
40 pr_err("Timeout waiting for reg 0x%X\n", reg);
41 return false;
42}
43
44/**************************************************
45 * DMA
46 **************************************************/
47
48static void bgmac_dma_tx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
49{
50 u32 val;
51 int i;
52
53 if (!ring->mmio_base)
54 return;
55
56 /* Suspend DMA TX ring first.
57 * bgmac_wait_value doesn't support waiting for any of few values, so
58 * implement whole loop here.
59 */
60 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL,
61 BGMAC_DMA_TX_SUSPEND);
62 for (i = 0; i < 10000 / 10; i++) {
63 val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
64 val &= BGMAC_DMA_TX_STAT;
65 if (val == BGMAC_DMA_TX_STAT_DISABLED ||
66 val == BGMAC_DMA_TX_STAT_IDLEWAIT ||
67 val == BGMAC_DMA_TX_STAT_STOPPED) {
68 i = 0;
69 break;
70 }
71 udelay(10);
72 }
73 if (i)
74 bgmac_err(bgmac, "Timeout suspending DMA TX ring 0x%X (BGMAC_DMA_TX_STAT: 0x%08X)\n",
75 ring->mmio_base, val);
76
77 /* Remove SUSPEND bit */
78 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, 0);
79 if (!bgmac_wait_value(bgmac->core,
80 ring->mmio_base + BGMAC_DMA_TX_STATUS,
81 BGMAC_DMA_TX_STAT, BGMAC_DMA_TX_STAT_DISABLED,
82 10000)) {
83 bgmac_warn(bgmac, "DMA TX ring 0x%X wasn't disabled on time, waiting additional 300us\n",
84 ring->mmio_base);
85 udelay(300);
86 val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
87 if ((val & BGMAC_DMA_TX_STAT) != BGMAC_DMA_TX_STAT_DISABLED)
88 bgmac_err(bgmac, "Reset of DMA TX ring 0x%X failed\n",
89 ring->mmio_base);
90 }
91}
92
93static void bgmac_dma_tx_enable(struct bgmac *bgmac,
94 struct bgmac_dma_ring *ring)
95{
96 u32 ctl;
97
98 ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL);
99 ctl |= BGMAC_DMA_TX_ENABLE;
100 ctl |= BGMAC_DMA_TX_PARITY_DISABLE;
101 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, ctl);
102}
103
104static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
105 struct bgmac_dma_ring *ring,
106 struct sk_buff *skb)
107{
108 struct device *dma_dev = bgmac->core->dma_dev;
109 struct net_device *net_dev = bgmac->net_dev;
110 struct bgmac_dma_desc *dma_desc;
111 struct bgmac_slot_info *slot;
112 u32 ctl0, ctl1;
113 int free_slots;
114
115 if (skb->len > BGMAC_DESC_CTL1_LEN) {
116 bgmac_err(bgmac, "Too long skb (%d)\n", skb->len);
117 goto err_stop_drop;
118 }
119
120 if (ring->start <= ring->end)
121 free_slots = ring->start - ring->end + BGMAC_TX_RING_SLOTS;
122 else
123 free_slots = ring->start - ring->end;
124 if (free_slots == 1) {
125 bgmac_err(bgmac, "TX ring is full, queue should be stopped!\n");
126 netif_stop_queue(net_dev);
127 return NETDEV_TX_BUSY;
128 }
129
130 slot = &ring->slots[ring->end];
131 slot->skb = skb;
132 slot->dma_addr = dma_map_single(dma_dev, skb->data, skb->len,
133 DMA_TO_DEVICE);
134 if (dma_mapping_error(dma_dev, slot->dma_addr)) {
135 bgmac_err(bgmac, "Mapping error of skb on ring 0x%X\n",
136 ring->mmio_base);
137 goto err_stop_drop;
138 }
139
140 ctl0 = BGMAC_DESC_CTL0_IOC | BGMAC_DESC_CTL0_SOF | BGMAC_DESC_CTL0_EOF;
141 if (ring->end == ring->num_slots - 1)
142 ctl0 |= BGMAC_DESC_CTL0_EOT;
143 ctl1 = skb->len & BGMAC_DESC_CTL1_LEN;
144
145 dma_desc = ring->cpu_base;
146 dma_desc += ring->end;
147 dma_desc->addr_low = cpu_to_le32(lower_32_bits(slot->dma_addr));
148 dma_desc->addr_high = cpu_to_le32(upper_32_bits(slot->dma_addr));
149 dma_desc->ctl0 = cpu_to_le32(ctl0);
150 dma_desc->ctl1 = cpu_to_le32(ctl1);
151
Hauke Mehrtens49a467b2013-09-29 13:54:58 +0200152 netdev_sent_queue(net_dev, skb->len);
153
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000154 wmb();
155
156 /* Increase ring->end to point empty slot. We tell hardware the first
157 * slot it should *not* read.
158 */
159 if (++ring->end >= BGMAC_TX_RING_SLOTS)
160 ring->end = 0;
161 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
Rafał Miłecki99003032013-09-15 23:13:18 +0200162 ring->index_base +
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000163 ring->end * sizeof(struct bgmac_dma_desc));
164
165 /* Always keep one slot free to allow detecting bugged calls. */
166 if (--free_slots == 1)
167 netif_stop_queue(net_dev);
168
169 return NETDEV_TX_OK;
170
171err_stop_drop:
172 netif_stop_queue(net_dev);
173 dev_kfree_skb(skb);
174 return NETDEV_TX_OK;
175}
176
177/* Free transmitted packets */
178static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
179{
180 struct device *dma_dev = bgmac->core->dma_dev;
181 int empty_slot;
182 bool freed = false;
Hauke Mehrtens49a467b2013-09-29 13:54:58 +0200183 unsigned bytes_compl = 0, pkts_compl = 0;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000184
185 /* The last slot that hardware didn't consume yet */
186 empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
187 empty_slot &= BGMAC_DMA_TX_STATDPTR;
Rafał Miłecki99003032013-09-15 23:13:18 +0200188 empty_slot -= ring->index_base;
189 empty_slot &= BGMAC_DMA_TX_STATDPTR;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000190 empty_slot /= sizeof(struct bgmac_dma_desc);
191
192 while (ring->start != empty_slot) {
193 struct bgmac_slot_info *slot = &ring->slots[ring->start];
194
195 if (slot->skb) {
196 /* Unmap no longer used buffer */
197 dma_unmap_single(dma_dev, slot->dma_addr,
198 slot->skb->len, DMA_TO_DEVICE);
199 slot->dma_addr = 0;
200
Hauke Mehrtens49a467b2013-09-29 13:54:58 +0200201 bytes_compl += slot->skb->len;
202 pkts_compl++;
203
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000204 /* Free memory! :) */
205 dev_kfree_skb(slot->skb);
206 slot->skb = NULL;
207 } else {
208 bgmac_err(bgmac, "Hardware reported transmission for empty TX ring slot %d! End of ring: %d\n",
209 ring->start, ring->end);
210 }
211
212 if (++ring->start >= BGMAC_TX_RING_SLOTS)
213 ring->start = 0;
214 freed = true;
215 }
216
Hauke Mehrtens49a467b2013-09-29 13:54:58 +0200217 netdev_completed_queue(bgmac->net_dev, pkts_compl, bytes_compl);
218
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000219 if (freed && netif_queue_stopped(bgmac->net_dev))
220 netif_wake_queue(bgmac->net_dev);
221}
222
223static void bgmac_dma_rx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
224{
225 if (!ring->mmio_base)
226 return;
227
228 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, 0);
229 if (!bgmac_wait_value(bgmac->core,
230 ring->mmio_base + BGMAC_DMA_RX_STATUS,
231 BGMAC_DMA_RX_STAT, BGMAC_DMA_RX_STAT_DISABLED,
232 10000))
233 bgmac_err(bgmac, "Reset of ring 0x%X RX failed\n",
234 ring->mmio_base);
235}
236
237static void bgmac_dma_rx_enable(struct bgmac *bgmac,
238 struct bgmac_dma_ring *ring)
239{
240 u32 ctl;
241
242 ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL);
243 ctl &= BGMAC_DMA_RX_ADDREXT_MASK;
244 ctl |= BGMAC_DMA_RX_ENABLE;
245 ctl |= BGMAC_DMA_RX_PARITY_DISABLE;
246 ctl |= BGMAC_DMA_RX_OVERFLOW_CONT;
247 ctl |= BGMAC_RX_FRAME_OFFSET << BGMAC_DMA_RX_FRAME_OFFSET_SHIFT;
248 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, ctl);
249}
250
251static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
252 struct bgmac_slot_info *slot)
253{
254 struct device *dma_dev = bgmac->core->dma_dev;
Nathan Hintzb757a622013-10-29 19:32:01 -0700255 struct sk_buff *skb;
256 dma_addr_t dma_addr;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000257 struct bgmac_rx_header *rx;
258
259 /* Alloc skb */
Nathan Hintzb757a622013-10-29 19:32:01 -0700260 skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE);
261 if (!skb)
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000262 return -ENOMEM;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000263
264 /* Poison - if everything goes fine, hardware will overwrite it */
Nathan Hintzb757a622013-10-29 19:32:01 -0700265 rx = (struct bgmac_rx_header *)skb->data;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000266 rx->len = cpu_to_le16(0xdead);
267 rx->flags = cpu_to_le16(0xbeef);
268
269 /* Map skb for the DMA */
Nathan Hintzb757a622013-10-29 19:32:01 -0700270 dma_addr = dma_map_single(dma_dev, skb->data,
271 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
272 if (dma_mapping_error(dma_dev, dma_addr)) {
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000273 bgmac_err(bgmac, "DMA mapping error\n");
Nathan Hintzb757a622013-10-29 19:32:01 -0700274 dev_kfree_skb(skb);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000275 return -ENOMEM;
276 }
Nathan Hintzb757a622013-10-29 19:32:01 -0700277
278 /* Update the slot */
279 slot->skb = skb;
280 slot->dma_addr = dma_addr;
281
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000282 if (slot->dma_addr & 0xC0000000)
283 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
284
285 return 0;
286}
287
Rafał Miłeckid549c76b2013-10-28 14:40:29 +0100288static void bgmac_dma_rx_setup_desc(struct bgmac *bgmac,
289 struct bgmac_dma_ring *ring, int desc_idx)
290{
291 struct bgmac_dma_desc *dma_desc = ring->cpu_base + desc_idx;
292 u32 ctl0 = 0, ctl1 = 0;
293
294 if (desc_idx == ring->num_slots - 1)
295 ctl0 |= BGMAC_DESC_CTL0_EOT;
296 ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN;
297 /* Is there any BGMAC device that requires extension? */
298 /* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) &
299 * B43_DMA64_DCTL1_ADDREXT_MASK;
300 */
301
302 dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[desc_idx].dma_addr));
303 dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[desc_idx].dma_addr));
304 dma_desc->ctl0 = cpu_to_le32(ctl0);
305 dma_desc->ctl1 = cpu_to_le32(ctl1);
306}
307
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000308static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
309 int weight)
310{
311 u32 end_slot;
312 int handled = 0;
313
314 end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS);
315 end_slot &= BGMAC_DMA_RX_STATDPTR;
Rafał Miłecki99003032013-09-15 23:13:18 +0200316 end_slot -= ring->index_base;
317 end_slot &= BGMAC_DMA_RX_STATDPTR;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000318 end_slot /= sizeof(struct bgmac_dma_desc);
319
320 ring->end = end_slot;
321
322 while (ring->start != ring->end) {
323 struct device *dma_dev = bgmac->core->dma_dev;
324 struct bgmac_slot_info *slot = &ring->slots[ring->start];
325 struct sk_buff *skb = slot->skb;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000326 struct bgmac_rx_header *rx;
327 u16 len, flags;
328
329 /* Unmap buffer to make it accessible to the CPU */
330 dma_sync_single_for_cpu(dma_dev, slot->dma_addr,
331 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
332
333 /* Get info from the header */
334 rx = (struct bgmac_rx_header *)skb->data;
335 len = le16_to_cpu(rx->len);
336 flags = le16_to_cpu(rx->flags);
337
Rafał Miłecki92b9ccd32013-10-30 08:00:00 +0100338 do {
339 dma_addr_t old_dma_addr = slot->dma_addr;
340 int err;
341
342 /* Check for poison and drop or pass the packet */
343 if (len == 0xdead && flags == 0xbeef) {
344 bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
345 ring->start);
346 dma_sync_single_for_device(dma_dev,
347 slot->dma_addr,
348 BGMAC_RX_BUF_SIZE,
349 DMA_FROM_DEVICE);
350 break;
351 }
352
Hauke Mehrtens02e71122013-02-28 07:16:54 +0000353 /* Omit CRC. */
354 len -= ETH_FCS_LEN;
355
Rafał Miłecki92b9ccd32013-10-30 08:00:00 +0100356 /* Prepare new skb as replacement */
357 err = bgmac_dma_rx_skb_for_slot(bgmac, slot);
358 if (err) {
359 /* Poison the old skb */
360 rx->len = cpu_to_le16(0xdead);
361 rx->flags = cpu_to_le16(0xbeef);
362
363 dma_sync_single_for_device(dma_dev,
364 slot->dma_addr,
365 BGMAC_RX_BUF_SIZE,
366 DMA_FROM_DEVICE);
367 break;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000368 }
Rafał Miłecki92b9ccd32013-10-30 08:00:00 +0100369 bgmac_dma_rx_setup_desc(bgmac, ring, ring->start);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000370
Rafał Miłecki92b9ccd32013-10-30 08:00:00 +0100371 /* Unmap old skb, we'll pass it to the netfif */
372 dma_unmap_single(dma_dev, old_dma_addr,
373 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000374
Rafał Miłecki92b9ccd32013-10-30 08:00:00 +0100375 skb_put(skb, BGMAC_RX_FRAME_OFFSET + len);
376 skb_pull(skb, BGMAC_RX_FRAME_OFFSET);
377
378 skb_checksum_none_assert(skb);
379 skb->protocol = eth_type_trans(skb, bgmac->net_dev);
380 netif_receive_skb(skb);
381 handled++;
382 } while (0);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000383
384 if (++ring->start >= BGMAC_RX_RING_SLOTS)
385 ring->start = 0;
386
387 if (handled >= weight) /* Should never be greater */
388 break;
389 }
390
391 return handled;
392}
393
394/* Does ring support unaligned addressing? */
395static bool bgmac_dma_unaligned(struct bgmac *bgmac,
396 struct bgmac_dma_ring *ring,
397 enum bgmac_dma_ring_type ring_type)
398{
399 switch (ring_type) {
400 case BGMAC_DMA_RING_TX:
401 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
402 0xff0);
403 if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO))
404 return true;
405 break;
406 case BGMAC_DMA_RING_RX:
407 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
408 0xff0);
409 if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO))
410 return true;
411 break;
412 }
413 return false;
414}
415
416static void bgmac_dma_ring_free(struct bgmac *bgmac,
417 struct bgmac_dma_ring *ring)
418{
419 struct device *dma_dev = bgmac->core->dma_dev;
420 struct bgmac_slot_info *slot;
421 int size;
422 int i;
423
424 for (i = 0; i < ring->num_slots; i++) {
425 slot = &ring->slots[i];
426 if (slot->skb) {
427 if (slot->dma_addr)
428 dma_unmap_single(dma_dev, slot->dma_addr,
429 slot->skb->len, DMA_TO_DEVICE);
430 dev_kfree_skb(slot->skb);
431 }
432 }
433
434 if (ring->cpu_base) {
435 /* Free ring of descriptors */
436 size = ring->num_slots * sizeof(struct bgmac_dma_desc);
437 dma_free_coherent(dma_dev, size, ring->cpu_base,
438 ring->dma_base);
439 }
440}
441
442static void bgmac_dma_free(struct bgmac *bgmac)
443{
444 int i;
445
446 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
447 bgmac_dma_ring_free(bgmac, &bgmac->tx_ring[i]);
448 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
449 bgmac_dma_ring_free(bgmac, &bgmac->rx_ring[i]);
450}
451
452static int bgmac_dma_alloc(struct bgmac *bgmac)
453{
454 struct device *dma_dev = bgmac->core->dma_dev;
455 struct bgmac_dma_ring *ring;
456 static const u16 ring_base[] = { BGMAC_DMA_BASE0, BGMAC_DMA_BASE1,
457 BGMAC_DMA_BASE2, BGMAC_DMA_BASE3, };
458 int size; /* ring size: different for Tx and Rx */
459 int err;
460 int i;
461
462 BUILD_BUG_ON(BGMAC_MAX_TX_RINGS > ARRAY_SIZE(ring_base));
463 BUILD_BUG_ON(BGMAC_MAX_RX_RINGS > ARRAY_SIZE(ring_base));
464
465 if (!(bcma_aread32(bgmac->core, BCMA_IOST) & BCMA_IOST_DMA64)) {
466 bgmac_err(bgmac, "Core does not report 64-bit DMA\n");
467 return -ENOTSUPP;
468 }
469
470 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
471 ring = &bgmac->tx_ring[i];
472 ring->num_slots = BGMAC_TX_RING_SLOTS;
473 ring->mmio_base = ring_base[i];
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000474
475 /* Alloc ring of descriptors */
476 size = ring->num_slots * sizeof(struct bgmac_dma_desc);
477 ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
478 &ring->dma_base,
479 GFP_KERNEL);
480 if (!ring->cpu_base) {
481 bgmac_err(bgmac, "Allocation of TX ring 0x%X failed\n",
482 ring->mmio_base);
483 goto err_dma_free;
484 }
485 if (ring->dma_base & 0xC0000000)
486 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
487
Rafał Miłecki99003032013-09-15 23:13:18 +0200488 ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
489 BGMAC_DMA_RING_TX);
490 if (ring->unaligned)
491 ring->index_base = lower_32_bits(ring->dma_base);
492 else
493 ring->index_base = 0;
494
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000495 /* No need to alloc TX slots yet */
496 }
497
498 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
Rafał Miłecki70a737b2013-02-25 08:22:26 +0000499 int j;
500
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000501 ring = &bgmac->rx_ring[i];
502 ring->num_slots = BGMAC_RX_RING_SLOTS;
503 ring->mmio_base = ring_base[i];
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000504
505 /* Alloc ring of descriptors */
506 size = ring->num_slots * sizeof(struct bgmac_dma_desc);
507 ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
508 &ring->dma_base,
509 GFP_KERNEL);
510 if (!ring->cpu_base) {
511 bgmac_err(bgmac, "Allocation of RX ring 0x%X failed\n",
512 ring->mmio_base);
513 err = -ENOMEM;
514 goto err_dma_free;
515 }
516 if (ring->dma_base & 0xC0000000)
517 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
518
Rafał Miłecki99003032013-09-15 23:13:18 +0200519 ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
520 BGMAC_DMA_RING_RX);
521 if (ring->unaligned)
522 ring->index_base = lower_32_bits(ring->dma_base);
523 else
524 ring->index_base = 0;
525
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000526 /* Alloc RX slots */
Rafał Miłecki70a737b2013-02-25 08:22:26 +0000527 for (j = 0; j < ring->num_slots; j++) {
528 err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000529 if (err) {
530 bgmac_err(bgmac, "Can't allocate skb for slot in RX ring\n");
531 goto err_dma_free;
532 }
533 }
534 }
535
536 return 0;
537
538err_dma_free:
539 bgmac_dma_free(bgmac);
540 return -ENOMEM;
541}
542
543static void bgmac_dma_init(struct bgmac *bgmac)
544{
545 struct bgmac_dma_ring *ring;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000546 int i;
547
548 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
549 ring = &bgmac->tx_ring[i];
550
Rafał Miłecki99003032013-09-15 23:13:18 +0200551 if (!ring->unaligned)
552 bgmac_dma_tx_enable(bgmac, ring);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000553 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
554 lower_32_bits(ring->dma_base));
555 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI,
556 upper_32_bits(ring->dma_base));
Rafał Miłecki99003032013-09-15 23:13:18 +0200557 if (ring->unaligned)
558 bgmac_dma_tx_enable(bgmac, ring);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000559
560 ring->start = 0;
561 ring->end = 0; /* Points the slot that should *not* be read */
562 }
563
564 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
Rafał Miłecki70a737b2013-02-25 08:22:26 +0000565 int j;
566
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000567 ring = &bgmac->rx_ring[i];
568
Rafał Miłecki99003032013-09-15 23:13:18 +0200569 if (!ring->unaligned)
570 bgmac_dma_rx_enable(bgmac, ring);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000571 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
572 lower_32_bits(ring->dma_base));
573 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI,
574 upper_32_bits(ring->dma_base));
Rafał Miłecki99003032013-09-15 23:13:18 +0200575 if (ring->unaligned)
576 bgmac_dma_rx_enable(bgmac, ring);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000577
Rafał Miłeckid549c76b2013-10-28 14:40:29 +0100578 for (j = 0; j < ring->num_slots; j++)
579 bgmac_dma_rx_setup_desc(bgmac, ring, j);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000580
581 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
Rafał Miłecki99003032013-09-15 23:13:18 +0200582 ring->index_base +
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000583 ring->num_slots * sizeof(struct bgmac_dma_desc));
584
585 ring->start = 0;
586 ring->end = 0;
587 }
588}
589
590/**************************************************
591 * PHY ops
592 **************************************************/
593
Rafał Miłecki217a55a2013-02-12 23:14:51 +0000594static u16 bgmac_phy_read(struct bgmac *bgmac, u8 phyaddr, u8 reg)
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000595{
596 struct bcma_device *core;
597 u16 phy_access_addr;
598 u16 phy_ctl_addr;
599 u32 tmp;
600
601 BUILD_BUG_ON(BGMAC_PA_DATA_MASK != BCMA_GMAC_CMN_PA_DATA_MASK);
602 BUILD_BUG_ON(BGMAC_PA_ADDR_MASK != BCMA_GMAC_CMN_PA_ADDR_MASK);
603 BUILD_BUG_ON(BGMAC_PA_ADDR_SHIFT != BCMA_GMAC_CMN_PA_ADDR_SHIFT);
604 BUILD_BUG_ON(BGMAC_PA_REG_MASK != BCMA_GMAC_CMN_PA_REG_MASK);
605 BUILD_BUG_ON(BGMAC_PA_REG_SHIFT != BCMA_GMAC_CMN_PA_REG_SHIFT);
606 BUILD_BUG_ON(BGMAC_PA_WRITE != BCMA_GMAC_CMN_PA_WRITE);
607 BUILD_BUG_ON(BGMAC_PA_START != BCMA_GMAC_CMN_PA_START);
608 BUILD_BUG_ON(BGMAC_PC_EPA_MASK != BCMA_GMAC_CMN_PC_EPA_MASK);
609 BUILD_BUG_ON(BGMAC_PC_MCT_MASK != BCMA_GMAC_CMN_PC_MCT_MASK);
610 BUILD_BUG_ON(BGMAC_PC_MCT_SHIFT != BCMA_GMAC_CMN_PC_MCT_SHIFT);
611 BUILD_BUG_ON(BGMAC_PC_MTE != BCMA_GMAC_CMN_PC_MTE);
612
613 if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) {
614 core = bgmac->core->bus->drv_gmac_cmn.core;
615 phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS;
616 phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL;
617 } else {
618 core = bgmac->core;
619 phy_access_addr = BGMAC_PHY_ACCESS;
620 phy_ctl_addr = BGMAC_PHY_CNTL;
621 }
622
623 tmp = bcma_read32(core, phy_ctl_addr);
624 tmp &= ~BGMAC_PC_EPA_MASK;
625 tmp |= phyaddr;
626 bcma_write32(core, phy_ctl_addr, tmp);
627
628 tmp = BGMAC_PA_START;
629 tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT;
630 tmp |= reg << BGMAC_PA_REG_SHIFT;
631 bcma_write32(core, phy_access_addr, tmp);
632
633 if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) {
634 bgmac_err(bgmac, "Reading PHY %d register 0x%X failed\n",
635 phyaddr, reg);
636 return 0xffff;
637 }
638
639 return bcma_read32(core, phy_access_addr) & BGMAC_PA_DATA_MASK;
640}
641
642/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphywr */
Rafał Miłecki217a55a2013-02-12 23:14:51 +0000643static int bgmac_phy_write(struct bgmac *bgmac, u8 phyaddr, u8 reg, u16 value)
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000644{
645 struct bcma_device *core;
646 u16 phy_access_addr;
647 u16 phy_ctl_addr;
648 u32 tmp;
649
650 if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) {
651 core = bgmac->core->bus->drv_gmac_cmn.core;
652 phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS;
653 phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL;
654 } else {
655 core = bgmac->core;
656 phy_access_addr = BGMAC_PHY_ACCESS;
657 phy_ctl_addr = BGMAC_PHY_CNTL;
658 }
659
660 tmp = bcma_read32(core, phy_ctl_addr);
661 tmp &= ~BGMAC_PC_EPA_MASK;
662 tmp |= phyaddr;
663 bcma_write32(core, phy_ctl_addr, tmp);
664
665 bgmac_write(bgmac, BGMAC_INT_STATUS, BGMAC_IS_MDIO);
666 if (bgmac_read(bgmac, BGMAC_INT_STATUS) & BGMAC_IS_MDIO)
667 bgmac_warn(bgmac, "Error setting MDIO int\n");
668
669 tmp = BGMAC_PA_START;
670 tmp |= BGMAC_PA_WRITE;
671 tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT;
672 tmp |= reg << BGMAC_PA_REG_SHIFT;
673 tmp |= value;
674 bcma_write32(core, phy_access_addr, tmp);
675
Rafał Miłecki217a55a2013-02-12 23:14:51 +0000676 if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) {
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000677 bgmac_err(bgmac, "Writing to PHY %d register 0x%X failed\n",
678 phyaddr, reg);
Rafał Miłecki217a55a2013-02-12 23:14:51 +0000679 return -ETIMEDOUT;
680 }
681
682 return 0;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000683}
684
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000685/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyinit */
686static void bgmac_phy_init(struct bgmac *bgmac)
687{
688 struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo;
689 struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
690 u8 i;
691
692 if (ci->id == BCMA_CHIP_ID_BCM5356) {
693 for (i = 0; i < 5; i++) {
694 bgmac_phy_write(bgmac, i, 0x1f, 0x008b);
695 bgmac_phy_write(bgmac, i, 0x15, 0x0100);
696 bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
697 bgmac_phy_write(bgmac, i, 0x12, 0x2aaa);
698 bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
699 }
700 }
701 if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg != 10) ||
702 (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg != 10) ||
703 (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg != 9)) {
704 bcma_chipco_chipctl_maskset(cc, 2, ~0xc0000000, 0);
705 bcma_chipco_chipctl_maskset(cc, 4, ~0x80000000, 0);
706 for (i = 0; i < 5; i++) {
707 bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
708 bgmac_phy_write(bgmac, i, 0x16, 0x5284);
709 bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
710 bgmac_phy_write(bgmac, i, 0x17, 0x0010);
711 bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
712 bgmac_phy_write(bgmac, i, 0x16, 0x5296);
713 bgmac_phy_write(bgmac, i, 0x17, 0x1073);
714 bgmac_phy_write(bgmac, i, 0x17, 0x9073);
715 bgmac_phy_write(bgmac, i, 0x16, 0x52b6);
716 bgmac_phy_write(bgmac, i, 0x17, 0x9273);
717 bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
718 }
719 }
720}
721
722/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyreset */
723static void bgmac_phy_reset(struct bgmac *bgmac)
724{
725 if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
726 return;
727
Rafał Miłecki5322dbf2013-12-20 15:33:52 +0100728 bgmac_phy_write(bgmac, bgmac->phyaddr, MII_BMCR, BMCR_RESET);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000729 udelay(100);
Rafał Miłecki5322dbf2013-12-20 15:33:52 +0100730 if (bgmac_phy_read(bgmac, bgmac->phyaddr, MII_BMCR) & BMCR_RESET)
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000731 bgmac_err(bgmac, "PHY reset failed\n");
732 bgmac_phy_init(bgmac);
733}
734
735/**************************************************
736 * Chip ops
737 **************************************************/
738
739/* TODO: can we just drop @force? Can we don't reset MAC at all if there is
740 * nothing to change? Try if after stabilizng driver.
741 */
742static void bgmac_cmdcfg_maskset(struct bgmac *bgmac, u32 mask, u32 set,
743 bool force)
744{
745 u32 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
746 u32 new_val = (cmdcfg & mask) | set;
747
748 bgmac_set(bgmac, BGMAC_CMDCFG, BGMAC_CMDCFG_SR);
749 udelay(2);
750
751 if (new_val != cmdcfg || force)
752 bgmac_write(bgmac, BGMAC_CMDCFG, new_val);
753
754 bgmac_mask(bgmac, BGMAC_CMDCFG, ~BGMAC_CMDCFG_SR);
755 udelay(2);
756}
757
Hauke Mehrtens4e209002013-02-06 04:44:58 +0000758static void bgmac_write_mac_address(struct bgmac *bgmac, u8 *addr)
759{
760 u32 tmp;
761
762 tmp = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
763 bgmac_write(bgmac, BGMAC_MACADDR_HIGH, tmp);
764 tmp = (addr[4] << 8) | addr[5];
765 bgmac_write(bgmac, BGMAC_MACADDR_LOW, tmp);
766}
767
Hauke Mehrtensc6edfe12013-02-06 05:51:49 +0000768static void bgmac_set_rx_mode(struct net_device *net_dev)
769{
770 struct bgmac *bgmac = netdev_priv(net_dev);
771
772 if (net_dev->flags & IFF_PROMISC)
Rafał Miłeckie9ba1032013-02-07 05:40:38 +0000773 bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_PROM, true);
Hauke Mehrtensc6edfe12013-02-06 05:51:49 +0000774 else
Rafał Miłeckie9ba1032013-02-07 05:40:38 +0000775 bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_PROM, 0, true);
Hauke Mehrtensc6edfe12013-02-06 05:51:49 +0000776}
777
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000778#if 0 /* We don't use that regs yet */
779static void bgmac_chip_stats_update(struct bgmac *bgmac)
780{
781 int i;
782
783 if (bgmac->core->id.id != BCMA_CORE_4706_MAC_GBIT) {
784 for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++)
785 bgmac->mib_tx_regs[i] =
786 bgmac_read(bgmac,
787 BGMAC_TX_GOOD_OCTETS + (i * 4));
788 for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++)
789 bgmac->mib_rx_regs[i] =
790 bgmac_read(bgmac,
791 BGMAC_RX_GOOD_OCTETS + (i * 4));
792 }
793
794 /* TODO: what else? how to handle BCM4706? Specs are needed */
795}
796#endif
797
798static void bgmac_clear_mib(struct bgmac *bgmac)
799{
800 int i;
801
802 if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT)
803 return;
804
805 bgmac_set(bgmac, BGMAC_DEV_CTL, BGMAC_DC_MROR);
806 for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++)
807 bgmac_read(bgmac, BGMAC_TX_GOOD_OCTETS + (i * 4));
808 for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++)
809 bgmac_read(bgmac, BGMAC_RX_GOOD_OCTETS + (i * 4));
810}
811
812/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_speed */
Rafał Miłecki5824d2d2013-12-07 00:53:55 +0100813static void bgmac_mac_speed(struct bgmac *bgmac)
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000814{
815 u32 mask = ~(BGMAC_CMDCFG_ES_MASK | BGMAC_CMDCFG_HD);
816 u32 set = 0;
817
Rafał Miłecki5824d2d2013-12-07 00:53:55 +0100818 switch (bgmac->mac_speed) {
819 case SPEED_10:
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000820 set |= BGMAC_CMDCFG_ES_10;
Rafał Miłecki5824d2d2013-12-07 00:53:55 +0100821 break;
822 case SPEED_100:
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000823 set |= BGMAC_CMDCFG_ES_100;
Rafał Miłecki5824d2d2013-12-07 00:53:55 +0100824 break;
825 case SPEED_1000:
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000826 set |= BGMAC_CMDCFG_ES_1000;
Rafał Miłecki5824d2d2013-12-07 00:53:55 +0100827 break;
828 default:
829 bgmac_err(bgmac, "Unsupported speed: %d\n", bgmac->mac_speed);
830 }
831
832 if (bgmac->mac_duplex == DUPLEX_HALF)
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000833 set |= BGMAC_CMDCFG_HD;
Rafał Miłecki5824d2d2013-12-07 00:53:55 +0100834
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000835 bgmac_cmdcfg_maskset(bgmac, mask, set, true);
836}
837
838static void bgmac_miiconfig(struct bgmac *bgmac)
839{
840 u8 imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
841 BGMAC_DS_MM_SHIFT;
842 if (imode == 0 || imode == 1) {
Rafał Miłecki5824d2d2013-12-07 00:53:55 +0100843 bgmac->mac_speed = SPEED_100;
844 bgmac->mac_duplex = DUPLEX_FULL;
845 bgmac_mac_speed(bgmac);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000846 }
847}
848
849/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipreset */
850static void bgmac_chip_reset(struct bgmac *bgmac)
851{
852 struct bcma_device *core = bgmac->core;
853 struct bcma_bus *bus = core->bus;
854 struct bcma_chipinfo *ci = &bus->chipinfo;
855 u32 flags = 0;
856 u32 iost;
857 int i;
858
859 if (bcma_core_is_enabled(core)) {
860 if (!bgmac->stats_grabbed) {
861 /* bgmac_chip_stats_update(bgmac); */
862 bgmac->stats_grabbed = true;
863 }
864
865 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
866 bgmac_dma_tx_reset(bgmac, &bgmac->tx_ring[i]);
867
868 bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
869 udelay(1);
870
871 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
872 bgmac_dma_rx_reset(bgmac, &bgmac->rx_ring[i]);
873
874 /* TODO: Clear software multicast filter list */
875 }
876
877 iost = bcma_aread32(core, BCMA_IOST);
Rafał Miłecki1a0ab762013-12-11 08:44:37 +0100878 if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) ||
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000879 (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) ||
Rafał Miłecki1a0ab762013-12-11 08:44:37 +0100880 (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188))
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000881 iost &= ~BGMAC_BCMA_IOST_ATTACHED;
882
883 if (iost & BGMAC_BCMA_IOST_ATTACHED) {
884 flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
885 if (!bgmac->has_robosw)
886 flags |= BGMAC_BCMA_IOCTL_SW_RESET;
887 }
888
889 bcma_core_enable(core, flags);
890
891 if (core->id.rev > 2) {
Rafał Miłecki1a0ab762013-12-11 08:44:37 +0100892 bgmac_set(bgmac, BCMA_CLKCTLST,
893 BGMAC_BCMA_CLKCTLST_MISC_PLL_REQ);
894 bgmac_wait_value(bgmac->core, BCMA_CLKCTLST,
895 BGMAC_BCMA_CLKCTLST_MISC_PLL_ST,
896 BGMAC_BCMA_CLKCTLST_MISC_PLL_ST,
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000897 1000);
898 }
899
Rafał Miłecki1a0ab762013-12-11 08:44:37 +0100900 if (ci->id == BCMA_CHIP_ID_BCM5357 ||
901 ci->id == BCMA_CHIP_ID_BCM4749 ||
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000902 ci->id == BCMA_CHIP_ID_BCM53572) {
903 struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
904 u8 et_swtype = 0;
905 u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY |
Rafał Miłecki6a391e72013-09-15 00:22:47 +0200906 BGMAC_CHIPCTL_1_IF_TYPE_MII;
Hauke Mehrtens36472682013-09-15 22:49:08 +0200907 char buf[4];
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000908
Hauke Mehrtens36472682013-09-15 22:49:08 +0200909 if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) {
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000910 if (kstrtou8(buf, 0, &et_swtype))
911 bgmac_err(bgmac, "Failed to parse et_swtype (%s)\n",
912 buf);
913 et_swtype &= 0x0f;
914 et_swtype <<= 4;
915 sw_type = et_swtype;
Rafał Miłecki1a0ab762013-12-11 08:44:37 +0100916 } else if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM5358) {
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000917 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII;
Rafał Miłecki1a0ab762013-12-11 08:44:37 +0100918 } else if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) ||
919 (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) ||
920 (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188)) {
Hauke Mehrtensb5a4c2f2013-02-06 04:44:57 +0000921 sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RGMII |
922 BGMAC_CHIPCTL_1_SW_TYPE_RGMII;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000923 }
924 bcma_chipco_chipctl_maskset(cc, 1,
925 ~(BGMAC_CHIPCTL_1_IF_TYPE_MASK |
926 BGMAC_CHIPCTL_1_SW_TYPE_MASK),
927 sw_type);
928 }
929
930 if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw)
931 bcma_awrite32(core, BCMA_IOCTL,
932 bcma_aread32(core, BCMA_IOCTL) &
933 ~BGMAC_BCMA_IOCTL_SW_RESET);
934
935 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset
936 * Specs don't say about using BGMAC_CMDCFG_SR, but in this routine
937 * BGMAC_CMDCFG is read _after_ putting chip in a reset. So it has to
938 * be keps until taking MAC out of the reset.
939 */
940 bgmac_cmdcfg_maskset(bgmac,
941 ~(BGMAC_CMDCFG_TE |
942 BGMAC_CMDCFG_RE |
943 BGMAC_CMDCFG_RPI |
944 BGMAC_CMDCFG_TAI |
945 BGMAC_CMDCFG_HD |
946 BGMAC_CMDCFG_ML |
947 BGMAC_CMDCFG_CFE |
948 BGMAC_CMDCFG_RL |
949 BGMAC_CMDCFG_RED |
950 BGMAC_CMDCFG_PE |
951 BGMAC_CMDCFG_TPI |
952 BGMAC_CMDCFG_PAD_EN |
953 BGMAC_CMDCFG_PF),
954 BGMAC_CMDCFG_PROM |
955 BGMAC_CMDCFG_NLC |
956 BGMAC_CMDCFG_CFE |
957 BGMAC_CMDCFG_SR,
958 false);
Rafał Miłeckid4699622013-12-11 07:44:14 +0100959 bgmac->mac_speed = SPEED_UNKNOWN;
960 bgmac->mac_duplex = DUPLEX_UNKNOWN;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000961
962 bgmac_clear_mib(bgmac);
963 if (core->id.id == BCMA_CORE_4706_MAC_GBIT)
964 bcma_maskset32(bgmac->cmn, BCMA_GMAC_CMN_PHY_CTL, ~0,
965 BCMA_GMAC_CMN_PC_MTE);
966 else
967 bgmac_set(bgmac, BGMAC_PHY_CNTL, BGMAC_PC_MTE);
968 bgmac_miiconfig(bgmac);
969 bgmac_phy_init(bgmac);
970
Hauke Mehrtens49a467b2013-09-29 13:54:58 +0200971 netdev_reset_queue(bgmac->net_dev);
972
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000973 bgmac->int_status = 0;
974}
975
976static void bgmac_chip_intrs_on(struct bgmac *bgmac)
977{
978 bgmac_write(bgmac, BGMAC_INT_MASK, bgmac->int_mask);
979}
980
981static void bgmac_chip_intrs_off(struct bgmac *bgmac)
982{
983 bgmac_write(bgmac, BGMAC_INT_MASK, 0);
Nathan Hintz41608152013-02-13 19:14:10 +0000984 bgmac_read(bgmac, BGMAC_INT_MASK);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000985}
986
987/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_enable */
988static void bgmac_enable(struct bgmac *bgmac)
989{
990 struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo;
991 u32 cmdcfg;
992 u32 mode;
993 u32 rxq_ctl;
994 u32 fl_ctl;
995 u16 bp_clk;
996 u8 mdp;
997
998 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
999 bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE),
1000 BGMAC_CMDCFG_SR, true);
1001 udelay(2);
1002 cmdcfg |= BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE;
1003 bgmac_write(bgmac, BGMAC_CMDCFG, cmdcfg);
1004
1005 mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
1006 BGMAC_DS_MM_SHIFT;
1007 if (ci->id != BCMA_CHIP_ID_BCM47162 || mode != 0)
1008 bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT);
1009 if (ci->id == BCMA_CHIP_ID_BCM47162 && mode == 2)
1010 bcma_chipco_chipctl_maskset(&bgmac->core->bus->drv_cc, 1, ~0,
1011 BGMAC_CHIPCTL_1_RXC_DLL_BYPASS);
1012
1013 switch (ci->id) {
1014 case BCMA_CHIP_ID_BCM5357:
1015 case BCMA_CHIP_ID_BCM4749:
1016 case BCMA_CHIP_ID_BCM53572:
1017 case BCMA_CHIP_ID_BCM4716:
1018 case BCMA_CHIP_ID_BCM47162:
1019 fl_ctl = 0x03cb04cb;
1020 if (ci->id == BCMA_CHIP_ID_BCM5357 ||
1021 ci->id == BCMA_CHIP_ID_BCM4749 ||
1022 ci->id == BCMA_CHIP_ID_BCM53572)
1023 fl_ctl = 0x2300e1;
1024 bgmac_write(bgmac, BGMAC_FLOW_CTL_THRESH, fl_ctl);
1025 bgmac_write(bgmac, BGMAC_PAUSE_CTL, 0x27fff);
1026 break;
1027 }
1028
1029 rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL);
1030 rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK;
1031 bp_clk = bcma_pmu_get_bus_clock(&bgmac->core->bus->drv_cc) / 1000000;
1032 mdp = (bp_clk * 128 / 1000) - 3;
1033 rxq_ctl |= (mdp << BGMAC_RXQ_CTL_MDP_SHIFT);
1034 bgmac_write(bgmac, BGMAC_RXQ_CTL, rxq_ctl);
1035}
1036
1037/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipinit */
1038static void bgmac_chip_init(struct bgmac *bgmac, bool full_init)
1039{
1040 struct bgmac_dma_ring *ring;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001041 int i;
1042
1043 /* 1 interrupt per received frame */
1044 bgmac_write(bgmac, BGMAC_INT_RECV_LAZY, 1 << BGMAC_IRL_FC_SHIFT);
1045
1046 /* Enable 802.3x tx flow control (honor received PAUSE frames) */
1047 bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_RPI, 0, true);
1048
Hauke Mehrtensc6edfe12013-02-06 05:51:49 +00001049 bgmac_set_rx_mode(bgmac->net_dev);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001050
Hauke Mehrtens4e209002013-02-06 04:44:58 +00001051 bgmac_write_mac_address(bgmac, bgmac->net_dev->dev_addr);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001052
1053 if (bgmac->loopback)
Rafał Miłeckie9ba1032013-02-07 05:40:38 +00001054 bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001055 else
Rafał Miłeckie9ba1032013-02-07 05:40:38 +00001056 bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_ML, 0, false);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001057
1058 bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + ETHER_MAX_LEN);
1059
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001060 if (full_init) {
1061 bgmac_dma_init(bgmac);
1062 if (1) /* FIXME: is there any case we don't want IRQs? */
1063 bgmac_chip_intrs_on(bgmac);
1064 } else {
1065 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
1066 ring = &bgmac->rx_ring[i];
1067 bgmac_dma_rx_enable(bgmac, ring);
1068 }
1069 }
1070
1071 bgmac_enable(bgmac);
1072}
1073
1074static irqreturn_t bgmac_interrupt(int irq, void *dev_id)
1075{
1076 struct bgmac *bgmac = netdev_priv(dev_id);
1077
1078 u32 int_status = bgmac_read(bgmac, BGMAC_INT_STATUS);
1079 int_status &= bgmac->int_mask;
1080
1081 if (!int_status)
1082 return IRQ_NONE;
1083
1084 /* Ack */
1085 bgmac_write(bgmac, BGMAC_INT_STATUS, int_status);
1086
1087 /* Disable new interrupts until handling existing ones */
1088 bgmac_chip_intrs_off(bgmac);
1089
1090 bgmac->int_status = int_status;
1091
1092 napi_schedule(&bgmac->napi);
1093
1094 return IRQ_HANDLED;
1095}
1096
1097static int bgmac_poll(struct napi_struct *napi, int weight)
1098{
1099 struct bgmac *bgmac = container_of(napi, struct bgmac, napi);
1100 struct bgmac_dma_ring *ring;
1101 int handled = 0;
1102
1103 if (bgmac->int_status & BGMAC_IS_TX0) {
1104 ring = &bgmac->tx_ring[0];
1105 bgmac_dma_tx_free(bgmac, ring);
1106 bgmac->int_status &= ~BGMAC_IS_TX0;
1107 }
1108
1109 if (bgmac->int_status & BGMAC_IS_RX) {
1110 ring = &bgmac->rx_ring[0];
1111 handled += bgmac_dma_rx_read(bgmac, ring, weight);
1112 bgmac->int_status &= ~BGMAC_IS_RX;
1113 }
1114
1115 if (bgmac->int_status) {
1116 bgmac_err(bgmac, "Unknown IRQs: 0x%08X\n", bgmac->int_status);
1117 bgmac->int_status = 0;
1118 }
1119
1120 if (handled < weight)
1121 napi_complete(napi);
1122
1123 bgmac_chip_intrs_on(bgmac);
1124
1125 return handled;
1126}
1127
1128/**************************************************
1129 * net_device_ops
1130 **************************************************/
1131
1132static int bgmac_open(struct net_device *net_dev)
1133{
1134 struct bgmac *bgmac = netdev_priv(net_dev);
1135 int err = 0;
1136
1137 bgmac_chip_reset(bgmac);
1138 /* Specs say about reclaiming rings here, but we do that in DMA init */
1139 bgmac_chip_init(bgmac, true);
1140
1141 err = request_irq(bgmac->core->irq, bgmac_interrupt, IRQF_SHARED,
1142 KBUILD_MODNAME, net_dev);
1143 if (err < 0) {
1144 bgmac_err(bgmac, "IRQ request error: %d!\n", err);
1145 goto err_out;
1146 }
1147 napi_enable(&bgmac->napi);
1148
Rafał Miłecki4e34da4d2013-12-10 17:19:39 +01001149 phy_start(bgmac->phy_dev);
1150
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001151 netif_carrier_on(net_dev);
1152
1153err_out:
1154 return err;
1155}
1156
1157static int bgmac_stop(struct net_device *net_dev)
1158{
1159 struct bgmac *bgmac = netdev_priv(net_dev);
1160
1161 netif_carrier_off(net_dev);
1162
Rafał Miłecki4e34da4d2013-12-10 17:19:39 +01001163 phy_stop(bgmac->phy_dev);
1164
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001165 napi_disable(&bgmac->napi);
1166 bgmac_chip_intrs_off(bgmac);
1167 free_irq(bgmac->core->irq, net_dev);
1168
1169 bgmac_chip_reset(bgmac);
1170
1171 return 0;
1172}
1173
1174static netdev_tx_t bgmac_start_xmit(struct sk_buff *skb,
1175 struct net_device *net_dev)
1176{
1177 struct bgmac *bgmac = netdev_priv(net_dev);
1178 struct bgmac_dma_ring *ring;
1179
1180 /* No QOS support yet */
1181 ring = &bgmac->tx_ring[0];
1182 return bgmac_dma_tx_add(bgmac, ring, skb);
1183}
1184
Hauke Mehrtens4e209002013-02-06 04:44:58 +00001185static int bgmac_set_mac_address(struct net_device *net_dev, void *addr)
1186{
1187 struct bgmac *bgmac = netdev_priv(net_dev);
1188 int ret;
1189
1190 ret = eth_prepare_mac_addr_change(net_dev, addr);
1191 if (ret < 0)
1192 return ret;
1193 bgmac_write_mac_address(bgmac, (u8 *)addr);
1194 eth_commit_mac_addr_change(net_dev, addr);
1195 return 0;
1196}
1197
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001198static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
1199{
1200 struct bgmac *bgmac = netdev_priv(net_dev);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001201
Hauke Mehrtens69c58852013-12-20 15:34:45 +01001202 if (!netif_running(net_dev))
1203 return -EINVAL;
1204
1205 return phy_mii_ioctl(bgmac->phy_dev, ifr, cmd);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001206}
1207
1208static const struct net_device_ops bgmac_netdev_ops = {
1209 .ndo_open = bgmac_open,
1210 .ndo_stop = bgmac_stop,
1211 .ndo_start_xmit = bgmac_start_xmit,
Hauke Mehrtensc6edfe12013-02-06 05:51:49 +00001212 .ndo_set_rx_mode = bgmac_set_rx_mode,
Hauke Mehrtens4e209002013-02-06 04:44:58 +00001213 .ndo_set_mac_address = bgmac_set_mac_address,
Hauke Mehrtens522c5902013-02-06 04:44:59 +00001214 .ndo_validate_addr = eth_validate_addr,
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001215 .ndo_do_ioctl = bgmac_ioctl,
1216};
1217
1218/**************************************************
1219 * ethtool_ops
1220 **************************************************/
1221
1222static int bgmac_get_settings(struct net_device *net_dev,
1223 struct ethtool_cmd *cmd)
1224{
1225 struct bgmac *bgmac = netdev_priv(net_dev);
1226
Rafał Miłecki5824d2d2013-12-07 00:53:55 +01001227 return phy_ethtool_gset(bgmac->phy_dev, cmd);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001228}
1229
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001230static int bgmac_set_settings(struct net_device *net_dev,
1231 struct ethtool_cmd *cmd)
1232{
1233 struct bgmac *bgmac = netdev_priv(net_dev);
1234
Rafał Miłecki5824d2d2013-12-07 00:53:55 +01001235 return phy_ethtool_sset(bgmac->phy_dev, cmd);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001236}
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001237
1238static void bgmac_get_drvinfo(struct net_device *net_dev,
1239 struct ethtool_drvinfo *info)
1240{
1241 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1242 strlcpy(info->bus_info, "BCMA", sizeof(info->bus_info));
1243}
1244
1245static const struct ethtool_ops bgmac_ethtool_ops = {
1246 .get_settings = bgmac_get_settings,
Rafał Miłecki5824d2d2013-12-07 00:53:55 +01001247 .set_settings = bgmac_set_settings,
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001248 .get_drvinfo = bgmac_get_drvinfo,
1249};
1250
1251/**************************************************
Rafał Miłecki11e5e762013-03-07 01:53:28 +00001252 * MII
1253 **************************************************/
1254
1255static int bgmac_mii_read(struct mii_bus *bus, int mii_id, int regnum)
1256{
1257 return bgmac_phy_read(bus->priv, mii_id, regnum);
1258}
1259
1260static int bgmac_mii_write(struct mii_bus *bus, int mii_id, int regnum,
1261 u16 value)
1262{
1263 return bgmac_phy_write(bus->priv, mii_id, regnum, value);
1264}
1265
Rafał Miłecki5824d2d2013-12-07 00:53:55 +01001266static void bgmac_adjust_link(struct net_device *net_dev)
1267{
1268 struct bgmac *bgmac = netdev_priv(net_dev);
1269 struct phy_device *phy_dev = bgmac->phy_dev;
1270 bool update = false;
1271
1272 if (phy_dev->link) {
1273 if (phy_dev->speed != bgmac->mac_speed) {
1274 bgmac->mac_speed = phy_dev->speed;
1275 update = true;
1276 }
1277
1278 if (phy_dev->duplex != bgmac->mac_duplex) {
1279 bgmac->mac_duplex = phy_dev->duplex;
1280 update = true;
1281 }
1282 }
1283
1284 if (update) {
1285 bgmac_mac_speed(bgmac);
1286 phy_print_status(phy_dev);
1287 }
1288}
1289
Rafał Miłecki11e5e762013-03-07 01:53:28 +00001290static int bgmac_mii_register(struct bgmac *bgmac)
1291{
1292 struct mii_bus *mii_bus;
Rafał Miłecki5824d2d2013-12-07 00:53:55 +01001293 struct phy_device *phy_dev;
1294 char bus_id[MII_BUS_ID_SIZE + 3];
Rafał Miłecki11e5e762013-03-07 01:53:28 +00001295 int i, err = 0;
1296
1297 mii_bus = mdiobus_alloc();
1298 if (!mii_bus)
1299 return -ENOMEM;
1300
1301 mii_bus->name = "bgmac mii bus";
1302 sprintf(mii_bus->id, "%s-%d-%d", "bgmac", bgmac->core->bus->num,
1303 bgmac->core->core_unit);
1304 mii_bus->priv = bgmac;
1305 mii_bus->read = bgmac_mii_read;
1306 mii_bus->write = bgmac_mii_write;
1307 mii_bus->parent = &bgmac->core->dev;
1308 mii_bus->phy_mask = ~(1 << bgmac->phyaddr);
1309
1310 mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
1311 if (!mii_bus->irq) {
1312 err = -ENOMEM;
1313 goto err_free_bus;
1314 }
1315 for (i = 0; i < PHY_MAX_ADDR; i++)
1316 mii_bus->irq[i] = PHY_POLL;
1317
1318 err = mdiobus_register(mii_bus);
1319 if (err) {
1320 bgmac_err(bgmac, "Registration of mii bus failed\n");
1321 goto err_free_irq;
1322 }
1323
1324 bgmac->mii_bus = mii_bus;
1325
Rafał Miłecki5824d2d2013-12-07 00:53:55 +01001326 /* Connect to the PHY */
1327 snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, mii_bus->id,
1328 bgmac->phyaddr);
1329 phy_dev = phy_connect(bgmac->net_dev, bus_id, &bgmac_adjust_link,
1330 PHY_INTERFACE_MODE_MII);
1331 if (IS_ERR(phy_dev)) {
1332 bgmac_err(bgmac, "PHY connecton failed\n");
1333 err = PTR_ERR(phy_dev);
1334 goto err_unregister_bus;
1335 }
1336 bgmac->phy_dev = phy_dev;
1337
Rafał Miłecki11e5e762013-03-07 01:53:28 +00001338 return err;
1339
Rafał Miłecki5824d2d2013-12-07 00:53:55 +01001340err_unregister_bus:
1341 mdiobus_unregister(mii_bus);
Rafał Miłecki11e5e762013-03-07 01:53:28 +00001342err_free_irq:
1343 kfree(mii_bus->irq);
1344err_free_bus:
1345 mdiobus_free(mii_bus);
1346 return err;
1347}
1348
1349static void bgmac_mii_unregister(struct bgmac *bgmac)
1350{
1351 struct mii_bus *mii_bus = bgmac->mii_bus;
1352
1353 mdiobus_unregister(mii_bus);
1354 kfree(mii_bus->irq);
1355 mdiobus_free(mii_bus);
1356}
1357
1358/**************************************************
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001359 * BCMA bus ops
1360 **************************************************/
1361
1362/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipattach */
1363static int bgmac_probe(struct bcma_device *core)
1364{
1365 struct net_device *net_dev;
1366 struct bgmac *bgmac;
1367 struct ssb_sprom *sprom = &core->bus->sprom;
1368 u8 *mac = core->core_unit ? sprom->et1mac : sprom->et0mac;
1369 int err;
1370
1371 /* We don't support 2nd, 3rd, ... units, SPROM has to be adjusted */
1372 if (core->core_unit > 1) {
1373 pr_err("Unsupported core_unit %d\n", core->core_unit);
1374 return -ENOTSUPP;
1375 }
1376
Rafał Miłeckid166f212013-02-07 00:27:17 +00001377 if (!is_valid_ether_addr(mac)) {
1378 dev_err(&core->dev, "Invalid MAC addr: %pM\n", mac);
1379 eth_random_addr(mac);
1380 dev_warn(&core->dev, "Using random MAC: %pM\n", mac);
1381 }
1382
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001383 /* Allocation and references */
1384 net_dev = alloc_etherdev(sizeof(*bgmac));
1385 if (!net_dev)
1386 return -ENOMEM;
1387 net_dev->netdev_ops = &bgmac_netdev_ops;
1388 net_dev->irq = core->irq;
1389 SET_ETHTOOL_OPS(net_dev, &bgmac_ethtool_ops);
1390 bgmac = netdev_priv(net_dev);
1391 bgmac->net_dev = net_dev;
1392 bgmac->core = core;
1393 bcma_set_drvdata(core, bgmac);
1394
1395 /* Defaults */
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001396 memcpy(bgmac->net_dev->dev_addr, mac, ETH_ALEN);
1397
1398 /* On BCM4706 we need common core to access PHY */
1399 if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
1400 !core->bus->drv_gmac_cmn.core) {
1401 bgmac_err(bgmac, "GMAC CMN core not found (required for BCM4706)\n");
1402 err = -ENODEV;
1403 goto err_netdev_free;
1404 }
1405 bgmac->cmn = core->bus->drv_gmac_cmn.core;
1406
1407 bgmac->phyaddr = core->core_unit ? sprom->et1phyaddr :
1408 sprom->et0phyaddr;
1409 bgmac->phyaddr &= BGMAC_PHY_MASK;
1410 if (bgmac->phyaddr == BGMAC_PHY_MASK) {
1411 bgmac_err(bgmac, "No PHY found\n");
1412 err = -ENODEV;
1413 goto err_netdev_free;
1414 }
1415 bgmac_info(bgmac, "Found PHY addr: %d%s\n", bgmac->phyaddr,
1416 bgmac->phyaddr == BGMAC_PHY_NOREGS ? " (NOREGS)" : "");
1417
1418 if (core->bus->hosttype == BCMA_HOSTTYPE_PCI) {
1419 bgmac_err(bgmac, "PCI setup not implemented\n");
1420 err = -ENOTSUPP;
1421 goto err_netdev_free;
1422 }
1423
1424 bgmac_chip_reset(bgmac);
1425
1426 err = bgmac_dma_alloc(bgmac);
1427 if (err) {
1428 bgmac_err(bgmac, "Unable to alloc memory for DMA\n");
1429 goto err_netdev_free;
1430 }
1431
1432 bgmac->int_mask = BGMAC_IS_ERRMASK | BGMAC_IS_RX | BGMAC_IS_TX_MASK;
Ralf Baechleedb15d82013-02-21 16:16:55 +01001433 if (bcm47xx_nvram_getenv("et0_no_txint", NULL, 0) == 0)
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001434 bgmac->int_mask &= ~BGMAC_IS_TX_MASK;
1435
1436 /* TODO: reset the external phy. Specs are needed */
1437 bgmac_phy_reset(bgmac);
1438
1439 bgmac->has_robosw = !!(core->bus->sprom.boardflags_lo &
1440 BGMAC_BFL_ENETROBO);
1441 if (bgmac->has_robosw)
1442 bgmac_warn(bgmac, "Support for Roboswitch not implemented\n");
1443
1444 if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM)
1445 bgmac_warn(bgmac, "Support for ADMtek ethernet switch not implemented\n");
1446
Rafał Miłecki11e5e762013-03-07 01:53:28 +00001447 err = bgmac_mii_register(bgmac);
1448 if (err) {
1449 bgmac_err(bgmac, "Cannot register MDIO\n");
1450 err = -ENOTSUPP;
1451 goto err_dma_free;
1452 }
1453
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001454 err = register_netdev(bgmac->net_dev);
1455 if (err) {
1456 bgmac_err(bgmac, "Cannot register net device\n");
1457 err = -ENOTSUPP;
Rafał Miłecki11e5e762013-03-07 01:53:28 +00001458 goto err_mii_unregister;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001459 }
1460
1461 netif_carrier_off(net_dev);
1462
1463 netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT);
1464
1465 return 0;
1466
Rafał Miłecki11e5e762013-03-07 01:53:28 +00001467err_mii_unregister:
1468 bgmac_mii_unregister(bgmac);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001469err_dma_free:
1470 bgmac_dma_free(bgmac);
1471
1472err_netdev_free:
1473 bcma_set_drvdata(core, NULL);
1474 free_netdev(net_dev);
1475
1476 return err;
1477}
1478
1479static void bgmac_remove(struct bcma_device *core)
1480{
1481 struct bgmac *bgmac = bcma_get_drvdata(core);
1482
1483 netif_napi_del(&bgmac->napi);
1484 unregister_netdev(bgmac->net_dev);
Rafał Miłecki11e5e762013-03-07 01:53:28 +00001485 bgmac_mii_unregister(bgmac);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001486 bgmac_dma_free(bgmac);
1487 bcma_set_drvdata(core, NULL);
1488 free_netdev(bgmac->net_dev);
1489}
1490
1491static struct bcma_driver bgmac_bcma_driver = {
1492 .name = KBUILD_MODNAME,
1493 .id_table = bgmac_bcma_tbl,
1494 .probe = bgmac_probe,
1495 .remove = bgmac_remove,
1496};
1497
1498static int __init bgmac_init(void)
1499{
1500 int err;
1501
1502 err = bcma_driver_register(&bgmac_bcma_driver);
1503 if (err)
1504 return err;
1505 pr_info("Broadcom 47xx GBit MAC driver loaded\n");
1506
1507 return 0;
1508}
1509
1510static void __exit bgmac_exit(void)
1511{
1512 bcma_driver_unregister(&bgmac_bcma_driver);
1513}
1514
1515module_init(bgmac_init)
1516module_exit(bgmac_exit)
1517
1518MODULE_AUTHOR("Rafał Miłecki");
1519MODULE_LICENSE("GPL");