blob: a98778e3af842181acded6ed602d91ad3a0a2747 [file] [log] [blame]
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001/*
2 * Driver for (BCM4706)? GBit MAC core on BCMA bus.
3 *
4 * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
5 *
6 * Licensed under the GNU/GPL. See COPYING for details.
7 */
8
9#include "bgmac.h"
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/delay.h>
14#include <linux/etherdevice.h>
15#include <linux/mii.h>
Rafał Miłecki11e5e762013-03-07 01:53:28 +000016#include <linux/phy.h>
Rafał Miłeckidd4544f2013-01-08 20:06:23 +000017#include <linux/interrupt.h>
18#include <linux/dma-mapping.h>
Ralf Baechleedb15d82013-02-21 16:16:55 +010019#include <bcm47xx_nvram.h>
Rafał Miłeckidd4544f2013-01-08 20:06:23 +000020
21static const struct bcma_device_id bgmac_bcma_tbl[] = {
22 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_4706_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS),
23 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS),
24 BCMA_CORETABLE_END
25};
26MODULE_DEVICE_TABLE(bcma, bgmac_bcma_tbl);
27
28static bool bgmac_wait_value(struct bcma_device *core, u16 reg, u32 mask,
29 u32 value, int timeout)
30{
31 u32 val;
32 int i;
33
34 for (i = 0; i < timeout / 10; i++) {
35 val = bcma_read32(core, reg);
36 if ((val & mask) == value)
37 return true;
38 udelay(10);
39 }
40 pr_err("Timeout waiting for reg 0x%X\n", reg);
41 return false;
42}
43
44/**************************************************
45 * DMA
46 **************************************************/
47
48static void bgmac_dma_tx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
49{
50 u32 val;
51 int i;
52
53 if (!ring->mmio_base)
54 return;
55
56 /* Suspend DMA TX ring first.
57 * bgmac_wait_value doesn't support waiting for any of few values, so
58 * implement whole loop here.
59 */
60 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL,
61 BGMAC_DMA_TX_SUSPEND);
62 for (i = 0; i < 10000 / 10; i++) {
63 val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
64 val &= BGMAC_DMA_TX_STAT;
65 if (val == BGMAC_DMA_TX_STAT_DISABLED ||
66 val == BGMAC_DMA_TX_STAT_IDLEWAIT ||
67 val == BGMAC_DMA_TX_STAT_STOPPED) {
68 i = 0;
69 break;
70 }
71 udelay(10);
72 }
73 if (i)
74 bgmac_err(bgmac, "Timeout suspending DMA TX ring 0x%X (BGMAC_DMA_TX_STAT: 0x%08X)\n",
75 ring->mmio_base, val);
76
77 /* Remove SUSPEND bit */
78 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, 0);
79 if (!bgmac_wait_value(bgmac->core,
80 ring->mmio_base + BGMAC_DMA_TX_STATUS,
81 BGMAC_DMA_TX_STAT, BGMAC_DMA_TX_STAT_DISABLED,
82 10000)) {
83 bgmac_warn(bgmac, "DMA TX ring 0x%X wasn't disabled on time, waiting additional 300us\n",
84 ring->mmio_base);
85 udelay(300);
86 val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
87 if ((val & BGMAC_DMA_TX_STAT) != BGMAC_DMA_TX_STAT_DISABLED)
88 bgmac_err(bgmac, "Reset of DMA TX ring 0x%X failed\n",
89 ring->mmio_base);
90 }
91}
92
93static void bgmac_dma_tx_enable(struct bgmac *bgmac,
94 struct bgmac_dma_ring *ring)
95{
96 u32 ctl;
97
98 ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL);
99 ctl |= BGMAC_DMA_TX_ENABLE;
100 ctl |= BGMAC_DMA_TX_PARITY_DISABLE;
101 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, ctl);
102}
103
104static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
105 struct bgmac_dma_ring *ring,
106 struct sk_buff *skb)
107{
108 struct device *dma_dev = bgmac->core->dma_dev;
109 struct net_device *net_dev = bgmac->net_dev;
110 struct bgmac_dma_desc *dma_desc;
111 struct bgmac_slot_info *slot;
112 u32 ctl0, ctl1;
113 int free_slots;
114
115 if (skb->len > BGMAC_DESC_CTL1_LEN) {
116 bgmac_err(bgmac, "Too long skb (%d)\n", skb->len);
117 goto err_stop_drop;
118 }
119
120 if (ring->start <= ring->end)
121 free_slots = ring->start - ring->end + BGMAC_TX_RING_SLOTS;
122 else
123 free_slots = ring->start - ring->end;
124 if (free_slots == 1) {
125 bgmac_err(bgmac, "TX ring is full, queue should be stopped!\n");
126 netif_stop_queue(net_dev);
127 return NETDEV_TX_BUSY;
128 }
129
130 slot = &ring->slots[ring->end];
131 slot->skb = skb;
132 slot->dma_addr = dma_map_single(dma_dev, skb->data, skb->len,
133 DMA_TO_DEVICE);
134 if (dma_mapping_error(dma_dev, slot->dma_addr)) {
135 bgmac_err(bgmac, "Mapping error of skb on ring 0x%X\n",
136 ring->mmio_base);
137 goto err_stop_drop;
138 }
139
140 ctl0 = BGMAC_DESC_CTL0_IOC | BGMAC_DESC_CTL0_SOF | BGMAC_DESC_CTL0_EOF;
141 if (ring->end == ring->num_slots - 1)
142 ctl0 |= BGMAC_DESC_CTL0_EOT;
143 ctl1 = skb->len & BGMAC_DESC_CTL1_LEN;
144
145 dma_desc = ring->cpu_base;
146 dma_desc += ring->end;
147 dma_desc->addr_low = cpu_to_le32(lower_32_bits(slot->dma_addr));
148 dma_desc->addr_high = cpu_to_le32(upper_32_bits(slot->dma_addr));
149 dma_desc->ctl0 = cpu_to_le32(ctl0);
150 dma_desc->ctl1 = cpu_to_le32(ctl1);
151
Hauke Mehrtens49a467b2013-09-29 13:54:58 +0200152 netdev_sent_queue(net_dev, skb->len);
153
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000154 wmb();
155
156 /* Increase ring->end to point empty slot. We tell hardware the first
157 * slot it should *not* read.
158 */
159 if (++ring->end >= BGMAC_TX_RING_SLOTS)
160 ring->end = 0;
161 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
Rafał Miłecki99003032013-09-15 23:13:18 +0200162 ring->index_base +
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000163 ring->end * sizeof(struct bgmac_dma_desc));
164
165 /* Always keep one slot free to allow detecting bugged calls. */
166 if (--free_slots == 1)
167 netif_stop_queue(net_dev);
168
169 return NETDEV_TX_OK;
170
171err_stop_drop:
172 netif_stop_queue(net_dev);
173 dev_kfree_skb(skb);
174 return NETDEV_TX_OK;
175}
176
177/* Free transmitted packets */
178static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
179{
180 struct device *dma_dev = bgmac->core->dma_dev;
181 int empty_slot;
182 bool freed = false;
Hauke Mehrtens49a467b2013-09-29 13:54:58 +0200183 unsigned bytes_compl = 0, pkts_compl = 0;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000184
185 /* The last slot that hardware didn't consume yet */
186 empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
187 empty_slot &= BGMAC_DMA_TX_STATDPTR;
Rafał Miłecki99003032013-09-15 23:13:18 +0200188 empty_slot -= ring->index_base;
189 empty_slot &= BGMAC_DMA_TX_STATDPTR;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000190 empty_slot /= sizeof(struct bgmac_dma_desc);
191
192 while (ring->start != empty_slot) {
193 struct bgmac_slot_info *slot = &ring->slots[ring->start];
194
195 if (slot->skb) {
196 /* Unmap no longer used buffer */
197 dma_unmap_single(dma_dev, slot->dma_addr,
198 slot->skb->len, DMA_TO_DEVICE);
199 slot->dma_addr = 0;
200
Hauke Mehrtens49a467b2013-09-29 13:54:58 +0200201 bytes_compl += slot->skb->len;
202 pkts_compl++;
203
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000204 /* Free memory! :) */
205 dev_kfree_skb(slot->skb);
206 slot->skb = NULL;
207 } else {
208 bgmac_err(bgmac, "Hardware reported transmission for empty TX ring slot %d! End of ring: %d\n",
209 ring->start, ring->end);
210 }
211
212 if (++ring->start >= BGMAC_TX_RING_SLOTS)
213 ring->start = 0;
214 freed = true;
215 }
216
Hauke Mehrtens49a467b2013-09-29 13:54:58 +0200217 netdev_completed_queue(bgmac->net_dev, pkts_compl, bytes_compl);
218
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000219 if (freed && netif_queue_stopped(bgmac->net_dev))
220 netif_wake_queue(bgmac->net_dev);
221}
222
223static void bgmac_dma_rx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
224{
225 if (!ring->mmio_base)
226 return;
227
228 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, 0);
229 if (!bgmac_wait_value(bgmac->core,
230 ring->mmio_base + BGMAC_DMA_RX_STATUS,
231 BGMAC_DMA_RX_STAT, BGMAC_DMA_RX_STAT_DISABLED,
232 10000))
233 bgmac_err(bgmac, "Reset of ring 0x%X RX failed\n",
234 ring->mmio_base);
235}
236
237static void bgmac_dma_rx_enable(struct bgmac *bgmac,
238 struct bgmac_dma_ring *ring)
239{
240 u32 ctl;
241
242 ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL);
243 ctl &= BGMAC_DMA_RX_ADDREXT_MASK;
244 ctl |= BGMAC_DMA_RX_ENABLE;
245 ctl |= BGMAC_DMA_RX_PARITY_DISABLE;
246 ctl |= BGMAC_DMA_RX_OVERFLOW_CONT;
247 ctl |= BGMAC_RX_FRAME_OFFSET << BGMAC_DMA_RX_FRAME_OFFSET_SHIFT;
248 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, ctl);
249}
250
251static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
252 struct bgmac_slot_info *slot)
253{
254 struct device *dma_dev = bgmac->core->dma_dev;
255 struct bgmac_rx_header *rx;
256
257 /* Alloc skb */
258 slot->skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE);
Joe Perches720a43e2013-03-08 15:03:25 +0000259 if (!slot->skb)
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000260 return -ENOMEM;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000261
262 /* Poison - if everything goes fine, hardware will overwrite it */
263 rx = (struct bgmac_rx_header *)slot->skb->data;
264 rx->len = cpu_to_le16(0xdead);
265 rx->flags = cpu_to_le16(0xbeef);
266
267 /* Map skb for the DMA */
268 slot->dma_addr = dma_map_single(dma_dev, slot->skb->data,
269 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
270 if (dma_mapping_error(dma_dev, slot->dma_addr)) {
271 bgmac_err(bgmac, "DMA mapping error\n");
272 return -ENOMEM;
273 }
274 if (slot->dma_addr & 0xC0000000)
275 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
276
277 return 0;
278}
279
Rafał Miłeckid549c76b2013-10-28 14:40:29 +0100280static void bgmac_dma_rx_setup_desc(struct bgmac *bgmac,
281 struct bgmac_dma_ring *ring, int desc_idx)
282{
283 struct bgmac_dma_desc *dma_desc = ring->cpu_base + desc_idx;
284 u32 ctl0 = 0, ctl1 = 0;
285
286 if (desc_idx == ring->num_slots - 1)
287 ctl0 |= BGMAC_DESC_CTL0_EOT;
288 ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN;
289 /* Is there any BGMAC device that requires extension? */
290 /* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) &
291 * B43_DMA64_DCTL1_ADDREXT_MASK;
292 */
293
294 dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[desc_idx].dma_addr));
295 dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[desc_idx].dma_addr));
296 dma_desc->ctl0 = cpu_to_le32(ctl0);
297 dma_desc->ctl1 = cpu_to_le32(ctl1);
298}
299
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000300static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
301 int weight)
302{
303 u32 end_slot;
304 int handled = 0;
305
306 end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS);
307 end_slot &= BGMAC_DMA_RX_STATDPTR;
Rafał Miłecki99003032013-09-15 23:13:18 +0200308 end_slot -= ring->index_base;
309 end_slot &= BGMAC_DMA_RX_STATDPTR;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000310 end_slot /= sizeof(struct bgmac_dma_desc);
311
312 ring->end = end_slot;
313
314 while (ring->start != ring->end) {
315 struct device *dma_dev = bgmac->core->dma_dev;
316 struct bgmac_slot_info *slot = &ring->slots[ring->start];
317 struct sk_buff *skb = slot->skb;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000318 struct bgmac_rx_header *rx;
319 u16 len, flags;
320
321 /* Unmap buffer to make it accessible to the CPU */
322 dma_sync_single_for_cpu(dma_dev, slot->dma_addr,
323 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
324
325 /* Get info from the header */
326 rx = (struct bgmac_rx_header *)skb->data;
327 len = le16_to_cpu(rx->len);
328 flags = le16_to_cpu(rx->flags);
329
Rafał Miłecki92b9ccd32013-10-30 08:00:00 +0100330 do {
331 dma_addr_t old_dma_addr = slot->dma_addr;
332 int err;
333
334 /* Check for poison and drop or pass the packet */
335 if (len == 0xdead && flags == 0xbeef) {
336 bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
337 ring->start);
338 dma_sync_single_for_device(dma_dev,
339 slot->dma_addr,
340 BGMAC_RX_BUF_SIZE,
341 DMA_FROM_DEVICE);
342 break;
343 }
344
Hauke Mehrtens02e71122013-02-28 07:16:54 +0000345 /* Omit CRC. */
346 len -= ETH_FCS_LEN;
347
Rafał Miłecki92b9ccd32013-10-30 08:00:00 +0100348 /* Prepare new skb as replacement */
349 err = bgmac_dma_rx_skb_for_slot(bgmac, slot);
350 if (err) {
351 /* Poison the old skb */
352 rx->len = cpu_to_le16(0xdead);
353 rx->flags = cpu_to_le16(0xbeef);
354
355 dma_sync_single_for_device(dma_dev,
356 slot->dma_addr,
357 BGMAC_RX_BUF_SIZE,
358 DMA_FROM_DEVICE);
359 break;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000360 }
Rafał Miłecki92b9ccd32013-10-30 08:00:00 +0100361 bgmac_dma_rx_setup_desc(bgmac, ring, ring->start);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000362
Rafał Miłecki92b9ccd32013-10-30 08:00:00 +0100363 /* Unmap old skb, we'll pass it to the netfif */
364 dma_unmap_single(dma_dev, old_dma_addr,
365 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000366
Rafał Miłecki92b9ccd32013-10-30 08:00:00 +0100367 skb_put(skb, BGMAC_RX_FRAME_OFFSET + len);
368 skb_pull(skb, BGMAC_RX_FRAME_OFFSET);
369
370 skb_checksum_none_assert(skb);
371 skb->protocol = eth_type_trans(skb, bgmac->net_dev);
372 netif_receive_skb(skb);
373 handled++;
374 } while (0);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000375
376 if (++ring->start >= BGMAC_RX_RING_SLOTS)
377 ring->start = 0;
378
379 if (handled >= weight) /* Should never be greater */
380 break;
381 }
382
383 return handled;
384}
385
386/* Does ring support unaligned addressing? */
387static bool bgmac_dma_unaligned(struct bgmac *bgmac,
388 struct bgmac_dma_ring *ring,
389 enum bgmac_dma_ring_type ring_type)
390{
391 switch (ring_type) {
392 case BGMAC_DMA_RING_TX:
393 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
394 0xff0);
395 if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO))
396 return true;
397 break;
398 case BGMAC_DMA_RING_RX:
399 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
400 0xff0);
401 if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO))
402 return true;
403 break;
404 }
405 return false;
406}
407
408static void bgmac_dma_ring_free(struct bgmac *bgmac,
409 struct bgmac_dma_ring *ring)
410{
411 struct device *dma_dev = bgmac->core->dma_dev;
412 struct bgmac_slot_info *slot;
413 int size;
414 int i;
415
416 for (i = 0; i < ring->num_slots; i++) {
417 slot = &ring->slots[i];
418 if (slot->skb) {
419 if (slot->dma_addr)
420 dma_unmap_single(dma_dev, slot->dma_addr,
421 slot->skb->len, DMA_TO_DEVICE);
422 dev_kfree_skb(slot->skb);
423 }
424 }
425
426 if (ring->cpu_base) {
427 /* Free ring of descriptors */
428 size = ring->num_slots * sizeof(struct bgmac_dma_desc);
429 dma_free_coherent(dma_dev, size, ring->cpu_base,
430 ring->dma_base);
431 }
432}
433
434static void bgmac_dma_free(struct bgmac *bgmac)
435{
436 int i;
437
438 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
439 bgmac_dma_ring_free(bgmac, &bgmac->tx_ring[i]);
440 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
441 bgmac_dma_ring_free(bgmac, &bgmac->rx_ring[i]);
442}
443
444static int bgmac_dma_alloc(struct bgmac *bgmac)
445{
446 struct device *dma_dev = bgmac->core->dma_dev;
447 struct bgmac_dma_ring *ring;
448 static const u16 ring_base[] = { BGMAC_DMA_BASE0, BGMAC_DMA_BASE1,
449 BGMAC_DMA_BASE2, BGMAC_DMA_BASE3, };
450 int size; /* ring size: different for Tx and Rx */
451 int err;
452 int i;
453
454 BUILD_BUG_ON(BGMAC_MAX_TX_RINGS > ARRAY_SIZE(ring_base));
455 BUILD_BUG_ON(BGMAC_MAX_RX_RINGS > ARRAY_SIZE(ring_base));
456
457 if (!(bcma_aread32(bgmac->core, BCMA_IOST) & BCMA_IOST_DMA64)) {
458 bgmac_err(bgmac, "Core does not report 64-bit DMA\n");
459 return -ENOTSUPP;
460 }
461
462 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
463 ring = &bgmac->tx_ring[i];
464 ring->num_slots = BGMAC_TX_RING_SLOTS;
465 ring->mmio_base = ring_base[i];
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000466
467 /* Alloc ring of descriptors */
468 size = ring->num_slots * sizeof(struct bgmac_dma_desc);
469 ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
470 &ring->dma_base,
471 GFP_KERNEL);
472 if (!ring->cpu_base) {
473 bgmac_err(bgmac, "Allocation of TX ring 0x%X failed\n",
474 ring->mmio_base);
475 goto err_dma_free;
476 }
477 if (ring->dma_base & 0xC0000000)
478 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
479
Rafał Miłecki99003032013-09-15 23:13:18 +0200480 ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
481 BGMAC_DMA_RING_TX);
482 if (ring->unaligned)
483 ring->index_base = lower_32_bits(ring->dma_base);
484 else
485 ring->index_base = 0;
486
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000487 /* No need to alloc TX slots yet */
488 }
489
490 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
Rafał Miłecki70a737b2013-02-25 08:22:26 +0000491 int j;
492
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000493 ring = &bgmac->rx_ring[i];
494 ring->num_slots = BGMAC_RX_RING_SLOTS;
495 ring->mmio_base = ring_base[i];
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000496
497 /* Alloc ring of descriptors */
498 size = ring->num_slots * sizeof(struct bgmac_dma_desc);
499 ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
500 &ring->dma_base,
501 GFP_KERNEL);
502 if (!ring->cpu_base) {
503 bgmac_err(bgmac, "Allocation of RX ring 0x%X failed\n",
504 ring->mmio_base);
505 err = -ENOMEM;
506 goto err_dma_free;
507 }
508 if (ring->dma_base & 0xC0000000)
509 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
510
Rafał Miłecki99003032013-09-15 23:13:18 +0200511 ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
512 BGMAC_DMA_RING_RX);
513 if (ring->unaligned)
514 ring->index_base = lower_32_bits(ring->dma_base);
515 else
516 ring->index_base = 0;
517
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000518 /* Alloc RX slots */
Rafał Miłecki70a737b2013-02-25 08:22:26 +0000519 for (j = 0; j < ring->num_slots; j++) {
520 err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000521 if (err) {
522 bgmac_err(bgmac, "Can't allocate skb for slot in RX ring\n");
523 goto err_dma_free;
524 }
525 }
526 }
527
528 return 0;
529
530err_dma_free:
531 bgmac_dma_free(bgmac);
532 return -ENOMEM;
533}
534
535static void bgmac_dma_init(struct bgmac *bgmac)
536{
537 struct bgmac_dma_ring *ring;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000538 int i;
539
540 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
541 ring = &bgmac->tx_ring[i];
542
Rafał Miłecki99003032013-09-15 23:13:18 +0200543 if (!ring->unaligned)
544 bgmac_dma_tx_enable(bgmac, ring);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000545 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
546 lower_32_bits(ring->dma_base));
547 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI,
548 upper_32_bits(ring->dma_base));
Rafał Miłecki99003032013-09-15 23:13:18 +0200549 if (ring->unaligned)
550 bgmac_dma_tx_enable(bgmac, ring);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000551
552 ring->start = 0;
553 ring->end = 0; /* Points the slot that should *not* be read */
554 }
555
556 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
Rafał Miłecki70a737b2013-02-25 08:22:26 +0000557 int j;
558
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000559 ring = &bgmac->rx_ring[i];
560
Rafał Miłecki99003032013-09-15 23:13:18 +0200561 if (!ring->unaligned)
562 bgmac_dma_rx_enable(bgmac, ring);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000563 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
564 lower_32_bits(ring->dma_base));
565 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI,
566 upper_32_bits(ring->dma_base));
Rafał Miłecki99003032013-09-15 23:13:18 +0200567 if (ring->unaligned)
568 bgmac_dma_rx_enable(bgmac, ring);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000569
Rafał Miłeckid549c76b2013-10-28 14:40:29 +0100570 for (j = 0; j < ring->num_slots; j++)
571 bgmac_dma_rx_setup_desc(bgmac, ring, j);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000572
573 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
Rafał Miłecki99003032013-09-15 23:13:18 +0200574 ring->index_base +
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000575 ring->num_slots * sizeof(struct bgmac_dma_desc));
576
577 ring->start = 0;
578 ring->end = 0;
579 }
580}
581
582/**************************************************
583 * PHY ops
584 **************************************************/
585
Rafał Miłecki217a55a2013-02-12 23:14:51 +0000586static u16 bgmac_phy_read(struct bgmac *bgmac, u8 phyaddr, u8 reg)
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000587{
588 struct bcma_device *core;
589 u16 phy_access_addr;
590 u16 phy_ctl_addr;
591 u32 tmp;
592
593 BUILD_BUG_ON(BGMAC_PA_DATA_MASK != BCMA_GMAC_CMN_PA_DATA_MASK);
594 BUILD_BUG_ON(BGMAC_PA_ADDR_MASK != BCMA_GMAC_CMN_PA_ADDR_MASK);
595 BUILD_BUG_ON(BGMAC_PA_ADDR_SHIFT != BCMA_GMAC_CMN_PA_ADDR_SHIFT);
596 BUILD_BUG_ON(BGMAC_PA_REG_MASK != BCMA_GMAC_CMN_PA_REG_MASK);
597 BUILD_BUG_ON(BGMAC_PA_REG_SHIFT != BCMA_GMAC_CMN_PA_REG_SHIFT);
598 BUILD_BUG_ON(BGMAC_PA_WRITE != BCMA_GMAC_CMN_PA_WRITE);
599 BUILD_BUG_ON(BGMAC_PA_START != BCMA_GMAC_CMN_PA_START);
600 BUILD_BUG_ON(BGMAC_PC_EPA_MASK != BCMA_GMAC_CMN_PC_EPA_MASK);
601 BUILD_BUG_ON(BGMAC_PC_MCT_MASK != BCMA_GMAC_CMN_PC_MCT_MASK);
602 BUILD_BUG_ON(BGMAC_PC_MCT_SHIFT != BCMA_GMAC_CMN_PC_MCT_SHIFT);
603 BUILD_BUG_ON(BGMAC_PC_MTE != BCMA_GMAC_CMN_PC_MTE);
604
605 if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) {
606 core = bgmac->core->bus->drv_gmac_cmn.core;
607 phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS;
608 phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL;
609 } else {
610 core = bgmac->core;
611 phy_access_addr = BGMAC_PHY_ACCESS;
612 phy_ctl_addr = BGMAC_PHY_CNTL;
613 }
614
615 tmp = bcma_read32(core, phy_ctl_addr);
616 tmp &= ~BGMAC_PC_EPA_MASK;
617 tmp |= phyaddr;
618 bcma_write32(core, phy_ctl_addr, tmp);
619
620 tmp = BGMAC_PA_START;
621 tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT;
622 tmp |= reg << BGMAC_PA_REG_SHIFT;
623 bcma_write32(core, phy_access_addr, tmp);
624
625 if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) {
626 bgmac_err(bgmac, "Reading PHY %d register 0x%X failed\n",
627 phyaddr, reg);
628 return 0xffff;
629 }
630
631 return bcma_read32(core, phy_access_addr) & BGMAC_PA_DATA_MASK;
632}
633
634/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphywr */
Rafał Miłecki217a55a2013-02-12 23:14:51 +0000635static int bgmac_phy_write(struct bgmac *bgmac, u8 phyaddr, u8 reg, u16 value)
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000636{
637 struct bcma_device *core;
638 u16 phy_access_addr;
639 u16 phy_ctl_addr;
640 u32 tmp;
641
642 if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) {
643 core = bgmac->core->bus->drv_gmac_cmn.core;
644 phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS;
645 phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL;
646 } else {
647 core = bgmac->core;
648 phy_access_addr = BGMAC_PHY_ACCESS;
649 phy_ctl_addr = BGMAC_PHY_CNTL;
650 }
651
652 tmp = bcma_read32(core, phy_ctl_addr);
653 tmp &= ~BGMAC_PC_EPA_MASK;
654 tmp |= phyaddr;
655 bcma_write32(core, phy_ctl_addr, tmp);
656
657 bgmac_write(bgmac, BGMAC_INT_STATUS, BGMAC_IS_MDIO);
658 if (bgmac_read(bgmac, BGMAC_INT_STATUS) & BGMAC_IS_MDIO)
659 bgmac_warn(bgmac, "Error setting MDIO int\n");
660
661 tmp = BGMAC_PA_START;
662 tmp |= BGMAC_PA_WRITE;
663 tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT;
664 tmp |= reg << BGMAC_PA_REG_SHIFT;
665 tmp |= value;
666 bcma_write32(core, phy_access_addr, tmp);
667
Rafał Miłecki217a55a2013-02-12 23:14:51 +0000668 if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) {
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000669 bgmac_err(bgmac, "Writing to PHY %d register 0x%X failed\n",
670 phyaddr, reg);
Rafał Miłecki217a55a2013-02-12 23:14:51 +0000671 return -ETIMEDOUT;
672 }
673
674 return 0;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000675}
676
677/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyforce */
678static void bgmac_phy_force(struct bgmac *bgmac)
679{
680 u16 ctl;
681 u16 mask = ~(BGMAC_PHY_CTL_SPEED | BGMAC_PHY_CTL_SPEED_MSB |
682 BGMAC_PHY_CTL_ANENAB | BGMAC_PHY_CTL_DUPLEX);
683
684 if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
685 return;
686
687 if (bgmac->autoneg)
688 return;
689
690 ctl = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL);
691 ctl &= mask;
692 if (bgmac->full_duplex)
693 ctl |= BGMAC_PHY_CTL_DUPLEX;
694 if (bgmac->speed == BGMAC_SPEED_100)
695 ctl |= BGMAC_PHY_CTL_SPEED_100;
696 else if (bgmac->speed == BGMAC_SPEED_1000)
697 ctl |= BGMAC_PHY_CTL_SPEED_1000;
698 bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL, ctl);
699}
700
701/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyadvertise */
702static void bgmac_phy_advertise(struct bgmac *bgmac)
703{
704 u16 adv;
705
706 if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
707 return;
708
709 if (!bgmac->autoneg)
710 return;
711
712 /* Adv selected 10/100 speeds */
713 adv = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV);
714 adv &= ~(BGMAC_PHY_ADV_10HALF | BGMAC_PHY_ADV_10FULL |
715 BGMAC_PHY_ADV_100HALF | BGMAC_PHY_ADV_100FULL);
716 if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_10)
717 adv |= BGMAC_PHY_ADV_10HALF;
718 if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_100)
719 adv |= BGMAC_PHY_ADV_100HALF;
720 if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_10)
721 adv |= BGMAC_PHY_ADV_10FULL;
722 if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_100)
723 adv |= BGMAC_PHY_ADV_100FULL;
724 bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV, adv);
725
726 /* Adv selected 1000 speeds */
727 adv = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV2);
728 adv &= ~(BGMAC_PHY_ADV2_1000HALF | BGMAC_PHY_ADV2_1000FULL);
729 if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_1000)
730 adv |= BGMAC_PHY_ADV2_1000HALF;
731 if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_1000)
732 adv |= BGMAC_PHY_ADV2_1000FULL;
733 bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV2, adv);
734
735 /* Restart */
736 bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL,
737 bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL) |
738 BGMAC_PHY_CTL_RESTART);
739}
740
741/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyinit */
742static void bgmac_phy_init(struct bgmac *bgmac)
743{
744 struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo;
745 struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
746 u8 i;
747
748 if (ci->id == BCMA_CHIP_ID_BCM5356) {
749 for (i = 0; i < 5; i++) {
750 bgmac_phy_write(bgmac, i, 0x1f, 0x008b);
751 bgmac_phy_write(bgmac, i, 0x15, 0x0100);
752 bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
753 bgmac_phy_write(bgmac, i, 0x12, 0x2aaa);
754 bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
755 }
756 }
757 if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg != 10) ||
758 (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg != 10) ||
759 (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg != 9)) {
760 bcma_chipco_chipctl_maskset(cc, 2, ~0xc0000000, 0);
761 bcma_chipco_chipctl_maskset(cc, 4, ~0x80000000, 0);
762 for (i = 0; i < 5; i++) {
763 bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
764 bgmac_phy_write(bgmac, i, 0x16, 0x5284);
765 bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
766 bgmac_phy_write(bgmac, i, 0x17, 0x0010);
767 bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
768 bgmac_phy_write(bgmac, i, 0x16, 0x5296);
769 bgmac_phy_write(bgmac, i, 0x17, 0x1073);
770 bgmac_phy_write(bgmac, i, 0x17, 0x9073);
771 bgmac_phy_write(bgmac, i, 0x16, 0x52b6);
772 bgmac_phy_write(bgmac, i, 0x17, 0x9273);
773 bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
774 }
775 }
776}
777
778/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyreset */
779static void bgmac_phy_reset(struct bgmac *bgmac)
780{
781 if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
782 return;
783
784 bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL,
785 BGMAC_PHY_CTL_RESET);
786 udelay(100);
787 if (bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL) &
788 BGMAC_PHY_CTL_RESET)
789 bgmac_err(bgmac, "PHY reset failed\n");
790 bgmac_phy_init(bgmac);
791}
792
793/**************************************************
794 * Chip ops
795 **************************************************/
796
797/* TODO: can we just drop @force? Can we don't reset MAC at all if there is
798 * nothing to change? Try if after stabilizng driver.
799 */
800static void bgmac_cmdcfg_maskset(struct bgmac *bgmac, u32 mask, u32 set,
801 bool force)
802{
803 u32 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
804 u32 new_val = (cmdcfg & mask) | set;
805
806 bgmac_set(bgmac, BGMAC_CMDCFG, BGMAC_CMDCFG_SR);
807 udelay(2);
808
809 if (new_val != cmdcfg || force)
810 bgmac_write(bgmac, BGMAC_CMDCFG, new_val);
811
812 bgmac_mask(bgmac, BGMAC_CMDCFG, ~BGMAC_CMDCFG_SR);
813 udelay(2);
814}
815
Hauke Mehrtens4e209002013-02-06 04:44:58 +0000816static void bgmac_write_mac_address(struct bgmac *bgmac, u8 *addr)
817{
818 u32 tmp;
819
820 tmp = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
821 bgmac_write(bgmac, BGMAC_MACADDR_HIGH, tmp);
822 tmp = (addr[4] << 8) | addr[5];
823 bgmac_write(bgmac, BGMAC_MACADDR_LOW, tmp);
824}
825
Hauke Mehrtensc6edfe12013-02-06 05:51:49 +0000826static void bgmac_set_rx_mode(struct net_device *net_dev)
827{
828 struct bgmac *bgmac = netdev_priv(net_dev);
829
830 if (net_dev->flags & IFF_PROMISC)
Rafał Miłeckie9ba1032013-02-07 05:40:38 +0000831 bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_PROM, true);
Hauke Mehrtensc6edfe12013-02-06 05:51:49 +0000832 else
Rafał Miłeckie9ba1032013-02-07 05:40:38 +0000833 bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_PROM, 0, true);
Hauke Mehrtensc6edfe12013-02-06 05:51:49 +0000834}
835
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000836#if 0 /* We don't use that regs yet */
837static void bgmac_chip_stats_update(struct bgmac *bgmac)
838{
839 int i;
840
841 if (bgmac->core->id.id != BCMA_CORE_4706_MAC_GBIT) {
842 for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++)
843 bgmac->mib_tx_regs[i] =
844 bgmac_read(bgmac,
845 BGMAC_TX_GOOD_OCTETS + (i * 4));
846 for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++)
847 bgmac->mib_rx_regs[i] =
848 bgmac_read(bgmac,
849 BGMAC_RX_GOOD_OCTETS + (i * 4));
850 }
851
852 /* TODO: what else? how to handle BCM4706? Specs are needed */
853}
854#endif
855
856static void bgmac_clear_mib(struct bgmac *bgmac)
857{
858 int i;
859
860 if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT)
861 return;
862
863 bgmac_set(bgmac, BGMAC_DEV_CTL, BGMAC_DC_MROR);
864 for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++)
865 bgmac_read(bgmac, BGMAC_TX_GOOD_OCTETS + (i * 4));
866 for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++)
867 bgmac_read(bgmac, BGMAC_RX_GOOD_OCTETS + (i * 4));
868}
869
870/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_speed */
871static void bgmac_speed(struct bgmac *bgmac, int speed)
872{
873 u32 mask = ~(BGMAC_CMDCFG_ES_MASK | BGMAC_CMDCFG_HD);
874 u32 set = 0;
875
876 if (speed & BGMAC_SPEED_10)
877 set |= BGMAC_CMDCFG_ES_10;
878 if (speed & BGMAC_SPEED_100)
879 set |= BGMAC_CMDCFG_ES_100;
880 if (speed & BGMAC_SPEED_1000)
881 set |= BGMAC_CMDCFG_ES_1000;
882 if (!bgmac->full_duplex)
883 set |= BGMAC_CMDCFG_HD;
884 bgmac_cmdcfg_maskset(bgmac, mask, set, true);
885}
886
887static void bgmac_miiconfig(struct bgmac *bgmac)
888{
889 u8 imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
890 BGMAC_DS_MM_SHIFT;
891 if (imode == 0 || imode == 1) {
892 if (bgmac->autoneg)
893 bgmac_speed(bgmac, BGMAC_SPEED_100);
894 else
895 bgmac_speed(bgmac, bgmac->speed);
896 }
897}
898
899/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipreset */
900static void bgmac_chip_reset(struct bgmac *bgmac)
901{
902 struct bcma_device *core = bgmac->core;
903 struct bcma_bus *bus = core->bus;
904 struct bcma_chipinfo *ci = &bus->chipinfo;
905 u32 flags = 0;
906 u32 iost;
907 int i;
908
909 if (bcma_core_is_enabled(core)) {
910 if (!bgmac->stats_grabbed) {
911 /* bgmac_chip_stats_update(bgmac); */
912 bgmac->stats_grabbed = true;
913 }
914
915 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
916 bgmac_dma_tx_reset(bgmac, &bgmac->tx_ring[i]);
917
918 bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
919 udelay(1);
920
921 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
922 bgmac_dma_rx_reset(bgmac, &bgmac->rx_ring[i]);
923
924 /* TODO: Clear software multicast filter list */
925 }
926
927 iost = bcma_aread32(core, BCMA_IOST);
928 if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == 10) ||
929 (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) ||
930 (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == 9))
931 iost &= ~BGMAC_BCMA_IOST_ATTACHED;
932
933 if (iost & BGMAC_BCMA_IOST_ATTACHED) {
934 flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
935 if (!bgmac->has_robosw)
936 flags |= BGMAC_BCMA_IOCTL_SW_RESET;
937 }
938
939 bcma_core_enable(core, flags);
940
941 if (core->id.rev > 2) {
942 bgmac_set(bgmac, BCMA_CLKCTLST, 1 << 8);
943 bgmac_wait_value(bgmac->core, BCMA_CLKCTLST, 1 << 24, 1 << 24,
944 1000);
945 }
946
947 if (ci->id == BCMA_CHIP_ID_BCM5357 || ci->id == BCMA_CHIP_ID_BCM4749 ||
948 ci->id == BCMA_CHIP_ID_BCM53572) {
949 struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
950 u8 et_swtype = 0;
951 u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY |
Rafał Miłecki6a391e72013-09-15 00:22:47 +0200952 BGMAC_CHIPCTL_1_IF_TYPE_MII;
Hauke Mehrtens36472682013-09-15 22:49:08 +0200953 char buf[4];
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000954
Hauke Mehrtens36472682013-09-15 22:49:08 +0200955 if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) {
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000956 if (kstrtou8(buf, 0, &et_swtype))
957 bgmac_err(bgmac, "Failed to parse et_swtype (%s)\n",
958 buf);
959 et_swtype &= 0x0f;
960 et_swtype <<= 4;
961 sw_type = et_swtype;
962 } else if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == 9) {
963 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII;
Hauke Mehrtensb5a4c2f2013-02-06 04:44:57 +0000964 } else if ((ci->id != BCMA_CHIP_ID_BCM53572 && ci->pkg == 10) ||
965 (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == 9)) {
966 sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RGMII |
967 BGMAC_CHIPCTL_1_SW_TYPE_RGMII;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000968 }
969 bcma_chipco_chipctl_maskset(cc, 1,
970 ~(BGMAC_CHIPCTL_1_IF_TYPE_MASK |
971 BGMAC_CHIPCTL_1_SW_TYPE_MASK),
972 sw_type);
973 }
974
975 if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw)
976 bcma_awrite32(core, BCMA_IOCTL,
977 bcma_aread32(core, BCMA_IOCTL) &
978 ~BGMAC_BCMA_IOCTL_SW_RESET);
979
980 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset
981 * Specs don't say about using BGMAC_CMDCFG_SR, but in this routine
982 * BGMAC_CMDCFG is read _after_ putting chip in a reset. So it has to
983 * be keps until taking MAC out of the reset.
984 */
985 bgmac_cmdcfg_maskset(bgmac,
986 ~(BGMAC_CMDCFG_TE |
987 BGMAC_CMDCFG_RE |
988 BGMAC_CMDCFG_RPI |
989 BGMAC_CMDCFG_TAI |
990 BGMAC_CMDCFG_HD |
991 BGMAC_CMDCFG_ML |
992 BGMAC_CMDCFG_CFE |
993 BGMAC_CMDCFG_RL |
994 BGMAC_CMDCFG_RED |
995 BGMAC_CMDCFG_PE |
996 BGMAC_CMDCFG_TPI |
997 BGMAC_CMDCFG_PAD_EN |
998 BGMAC_CMDCFG_PF),
999 BGMAC_CMDCFG_PROM |
1000 BGMAC_CMDCFG_NLC |
1001 BGMAC_CMDCFG_CFE |
1002 BGMAC_CMDCFG_SR,
1003 false);
1004
1005 bgmac_clear_mib(bgmac);
1006 if (core->id.id == BCMA_CORE_4706_MAC_GBIT)
1007 bcma_maskset32(bgmac->cmn, BCMA_GMAC_CMN_PHY_CTL, ~0,
1008 BCMA_GMAC_CMN_PC_MTE);
1009 else
1010 bgmac_set(bgmac, BGMAC_PHY_CNTL, BGMAC_PC_MTE);
1011 bgmac_miiconfig(bgmac);
1012 bgmac_phy_init(bgmac);
1013
Hauke Mehrtens49a467b2013-09-29 13:54:58 +02001014 netdev_reset_queue(bgmac->net_dev);
1015
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001016 bgmac->int_status = 0;
1017}
1018
1019static void bgmac_chip_intrs_on(struct bgmac *bgmac)
1020{
1021 bgmac_write(bgmac, BGMAC_INT_MASK, bgmac->int_mask);
1022}
1023
1024static void bgmac_chip_intrs_off(struct bgmac *bgmac)
1025{
1026 bgmac_write(bgmac, BGMAC_INT_MASK, 0);
Nathan Hintz41608152013-02-13 19:14:10 +00001027 bgmac_read(bgmac, BGMAC_INT_MASK);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001028}
1029
1030/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_enable */
1031static void bgmac_enable(struct bgmac *bgmac)
1032{
1033 struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo;
1034 u32 cmdcfg;
1035 u32 mode;
1036 u32 rxq_ctl;
1037 u32 fl_ctl;
1038 u16 bp_clk;
1039 u8 mdp;
1040
1041 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
1042 bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE),
1043 BGMAC_CMDCFG_SR, true);
1044 udelay(2);
1045 cmdcfg |= BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE;
1046 bgmac_write(bgmac, BGMAC_CMDCFG, cmdcfg);
1047
1048 mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
1049 BGMAC_DS_MM_SHIFT;
1050 if (ci->id != BCMA_CHIP_ID_BCM47162 || mode != 0)
1051 bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT);
1052 if (ci->id == BCMA_CHIP_ID_BCM47162 && mode == 2)
1053 bcma_chipco_chipctl_maskset(&bgmac->core->bus->drv_cc, 1, ~0,
1054 BGMAC_CHIPCTL_1_RXC_DLL_BYPASS);
1055
1056 switch (ci->id) {
1057 case BCMA_CHIP_ID_BCM5357:
1058 case BCMA_CHIP_ID_BCM4749:
1059 case BCMA_CHIP_ID_BCM53572:
1060 case BCMA_CHIP_ID_BCM4716:
1061 case BCMA_CHIP_ID_BCM47162:
1062 fl_ctl = 0x03cb04cb;
1063 if (ci->id == BCMA_CHIP_ID_BCM5357 ||
1064 ci->id == BCMA_CHIP_ID_BCM4749 ||
1065 ci->id == BCMA_CHIP_ID_BCM53572)
1066 fl_ctl = 0x2300e1;
1067 bgmac_write(bgmac, BGMAC_FLOW_CTL_THRESH, fl_ctl);
1068 bgmac_write(bgmac, BGMAC_PAUSE_CTL, 0x27fff);
1069 break;
1070 }
1071
1072 rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL);
1073 rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK;
1074 bp_clk = bcma_pmu_get_bus_clock(&bgmac->core->bus->drv_cc) / 1000000;
1075 mdp = (bp_clk * 128 / 1000) - 3;
1076 rxq_ctl |= (mdp << BGMAC_RXQ_CTL_MDP_SHIFT);
1077 bgmac_write(bgmac, BGMAC_RXQ_CTL, rxq_ctl);
1078}
1079
1080/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipinit */
1081static void bgmac_chip_init(struct bgmac *bgmac, bool full_init)
1082{
1083 struct bgmac_dma_ring *ring;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001084 int i;
1085
1086 /* 1 interrupt per received frame */
1087 bgmac_write(bgmac, BGMAC_INT_RECV_LAZY, 1 << BGMAC_IRL_FC_SHIFT);
1088
1089 /* Enable 802.3x tx flow control (honor received PAUSE frames) */
1090 bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_RPI, 0, true);
1091
Hauke Mehrtensc6edfe12013-02-06 05:51:49 +00001092 bgmac_set_rx_mode(bgmac->net_dev);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001093
Hauke Mehrtens4e209002013-02-06 04:44:58 +00001094 bgmac_write_mac_address(bgmac, bgmac->net_dev->dev_addr);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001095
1096 if (bgmac->loopback)
Rafał Miłeckie9ba1032013-02-07 05:40:38 +00001097 bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001098 else
Rafał Miłeckie9ba1032013-02-07 05:40:38 +00001099 bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_ML, 0, false);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001100
1101 bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + ETHER_MAX_LEN);
1102
1103 if (!bgmac->autoneg) {
1104 bgmac_speed(bgmac, bgmac->speed);
1105 bgmac_phy_force(bgmac);
1106 } else if (bgmac->speed) { /* if there is anything to adv */
1107 bgmac_phy_advertise(bgmac);
1108 }
1109
1110 if (full_init) {
1111 bgmac_dma_init(bgmac);
1112 if (1) /* FIXME: is there any case we don't want IRQs? */
1113 bgmac_chip_intrs_on(bgmac);
1114 } else {
1115 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
1116 ring = &bgmac->rx_ring[i];
1117 bgmac_dma_rx_enable(bgmac, ring);
1118 }
1119 }
1120
1121 bgmac_enable(bgmac);
1122}
1123
1124static irqreturn_t bgmac_interrupt(int irq, void *dev_id)
1125{
1126 struct bgmac *bgmac = netdev_priv(dev_id);
1127
1128 u32 int_status = bgmac_read(bgmac, BGMAC_INT_STATUS);
1129 int_status &= bgmac->int_mask;
1130
1131 if (!int_status)
1132 return IRQ_NONE;
1133
1134 /* Ack */
1135 bgmac_write(bgmac, BGMAC_INT_STATUS, int_status);
1136
1137 /* Disable new interrupts until handling existing ones */
1138 bgmac_chip_intrs_off(bgmac);
1139
1140 bgmac->int_status = int_status;
1141
1142 napi_schedule(&bgmac->napi);
1143
1144 return IRQ_HANDLED;
1145}
1146
1147static int bgmac_poll(struct napi_struct *napi, int weight)
1148{
1149 struct bgmac *bgmac = container_of(napi, struct bgmac, napi);
1150 struct bgmac_dma_ring *ring;
1151 int handled = 0;
1152
1153 if (bgmac->int_status & BGMAC_IS_TX0) {
1154 ring = &bgmac->tx_ring[0];
1155 bgmac_dma_tx_free(bgmac, ring);
1156 bgmac->int_status &= ~BGMAC_IS_TX0;
1157 }
1158
1159 if (bgmac->int_status & BGMAC_IS_RX) {
1160 ring = &bgmac->rx_ring[0];
1161 handled += bgmac_dma_rx_read(bgmac, ring, weight);
1162 bgmac->int_status &= ~BGMAC_IS_RX;
1163 }
1164
1165 if (bgmac->int_status) {
1166 bgmac_err(bgmac, "Unknown IRQs: 0x%08X\n", bgmac->int_status);
1167 bgmac->int_status = 0;
1168 }
1169
1170 if (handled < weight)
1171 napi_complete(napi);
1172
1173 bgmac_chip_intrs_on(bgmac);
1174
1175 return handled;
1176}
1177
1178/**************************************************
1179 * net_device_ops
1180 **************************************************/
1181
1182static int bgmac_open(struct net_device *net_dev)
1183{
1184 struct bgmac *bgmac = netdev_priv(net_dev);
1185 int err = 0;
1186
1187 bgmac_chip_reset(bgmac);
1188 /* Specs say about reclaiming rings here, but we do that in DMA init */
1189 bgmac_chip_init(bgmac, true);
1190
1191 err = request_irq(bgmac->core->irq, bgmac_interrupt, IRQF_SHARED,
1192 KBUILD_MODNAME, net_dev);
1193 if (err < 0) {
1194 bgmac_err(bgmac, "IRQ request error: %d!\n", err);
1195 goto err_out;
1196 }
1197 napi_enable(&bgmac->napi);
1198
1199 netif_carrier_on(net_dev);
1200
1201err_out:
1202 return err;
1203}
1204
1205static int bgmac_stop(struct net_device *net_dev)
1206{
1207 struct bgmac *bgmac = netdev_priv(net_dev);
1208
1209 netif_carrier_off(net_dev);
1210
1211 napi_disable(&bgmac->napi);
1212 bgmac_chip_intrs_off(bgmac);
1213 free_irq(bgmac->core->irq, net_dev);
1214
1215 bgmac_chip_reset(bgmac);
1216
1217 return 0;
1218}
1219
1220static netdev_tx_t bgmac_start_xmit(struct sk_buff *skb,
1221 struct net_device *net_dev)
1222{
1223 struct bgmac *bgmac = netdev_priv(net_dev);
1224 struct bgmac_dma_ring *ring;
1225
1226 /* No QOS support yet */
1227 ring = &bgmac->tx_ring[0];
1228 return bgmac_dma_tx_add(bgmac, ring, skb);
1229}
1230
Hauke Mehrtens4e209002013-02-06 04:44:58 +00001231static int bgmac_set_mac_address(struct net_device *net_dev, void *addr)
1232{
1233 struct bgmac *bgmac = netdev_priv(net_dev);
1234 int ret;
1235
1236 ret = eth_prepare_mac_addr_change(net_dev, addr);
1237 if (ret < 0)
1238 return ret;
1239 bgmac_write_mac_address(bgmac, (u8 *)addr);
1240 eth_commit_mac_addr_change(net_dev, addr);
1241 return 0;
1242}
1243
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001244static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
1245{
1246 struct bgmac *bgmac = netdev_priv(net_dev);
1247 struct mii_ioctl_data *data = if_mii(ifr);
1248
1249 switch (cmd) {
1250 case SIOCGMIIPHY:
1251 data->phy_id = bgmac->phyaddr;
1252 /* fallthru */
1253 case SIOCGMIIREG:
1254 if (!netif_running(net_dev))
1255 return -EAGAIN;
1256 data->val_out = bgmac_phy_read(bgmac, data->phy_id,
1257 data->reg_num & 0x1f);
1258 return 0;
1259 case SIOCSMIIREG:
1260 if (!netif_running(net_dev))
1261 return -EAGAIN;
1262 bgmac_phy_write(bgmac, data->phy_id, data->reg_num & 0x1f,
1263 data->val_in);
1264 return 0;
1265 default:
1266 return -EOPNOTSUPP;
1267 }
1268}
1269
1270static const struct net_device_ops bgmac_netdev_ops = {
1271 .ndo_open = bgmac_open,
1272 .ndo_stop = bgmac_stop,
1273 .ndo_start_xmit = bgmac_start_xmit,
Hauke Mehrtensc6edfe12013-02-06 05:51:49 +00001274 .ndo_set_rx_mode = bgmac_set_rx_mode,
Hauke Mehrtens4e209002013-02-06 04:44:58 +00001275 .ndo_set_mac_address = bgmac_set_mac_address,
Hauke Mehrtens522c5902013-02-06 04:44:59 +00001276 .ndo_validate_addr = eth_validate_addr,
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001277 .ndo_do_ioctl = bgmac_ioctl,
1278};
1279
1280/**************************************************
1281 * ethtool_ops
1282 **************************************************/
1283
1284static int bgmac_get_settings(struct net_device *net_dev,
1285 struct ethtool_cmd *cmd)
1286{
1287 struct bgmac *bgmac = netdev_priv(net_dev);
1288
1289 cmd->supported = SUPPORTED_10baseT_Half |
1290 SUPPORTED_10baseT_Full |
1291 SUPPORTED_100baseT_Half |
1292 SUPPORTED_100baseT_Full |
1293 SUPPORTED_1000baseT_Half |
1294 SUPPORTED_1000baseT_Full |
1295 SUPPORTED_Autoneg;
1296
1297 if (bgmac->autoneg) {
1298 WARN_ON(cmd->advertising);
1299 if (bgmac->full_duplex) {
1300 if (bgmac->speed & BGMAC_SPEED_10)
1301 cmd->advertising |= ADVERTISED_10baseT_Full;
1302 if (bgmac->speed & BGMAC_SPEED_100)
1303 cmd->advertising |= ADVERTISED_100baseT_Full;
1304 if (bgmac->speed & BGMAC_SPEED_1000)
1305 cmd->advertising |= ADVERTISED_1000baseT_Full;
1306 } else {
1307 if (bgmac->speed & BGMAC_SPEED_10)
1308 cmd->advertising |= ADVERTISED_10baseT_Half;
1309 if (bgmac->speed & BGMAC_SPEED_100)
1310 cmd->advertising |= ADVERTISED_100baseT_Half;
1311 if (bgmac->speed & BGMAC_SPEED_1000)
1312 cmd->advertising |= ADVERTISED_1000baseT_Half;
1313 }
1314 } else {
1315 switch (bgmac->speed) {
1316 case BGMAC_SPEED_10:
1317 ethtool_cmd_speed_set(cmd, SPEED_10);
1318 break;
1319 case BGMAC_SPEED_100:
1320 ethtool_cmd_speed_set(cmd, SPEED_100);
1321 break;
1322 case BGMAC_SPEED_1000:
1323 ethtool_cmd_speed_set(cmd, SPEED_1000);
1324 break;
1325 }
1326 }
1327
1328 cmd->duplex = bgmac->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
1329
1330 cmd->autoneg = bgmac->autoneg;
1331
1332 return 0;
1333}
1334
1335#if 0
1336static int bgmac_set_settings(struct net_device *net_dev,
1337 struct ethtool_cmd *cmd)
1338{
1339 struct bgmac *bgmac = netdev_priv(net_dev);
1340
1341 return -1;
1342}
1343#endif
1344
1345static void bgmac_get_drvinfo(struct net_device *net_dev,
1346 struct ethtool_drvinfo *info)
1347{
1348 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1349 strlcpy(info->bus_info, "BCMA", sizeof(info->bus_info));
1350}
1351
1352static const struct ethtool_ops bgmac_ethtool_ops = {
1353 .get_settings = bgmac_get_settings,
1354 .get_drvinfo = bgmac_get_drvinfo,
1355};
1356
1357/**************************************************
Rafał Miłecki11e5e762013-03-07 01:53:28 +00001358 * MII
1359 **************************************************/
1360
1361static int bgmac_mii_read(struct mii_bus *bus, int mii_id, int regnum)
1362{
1363 return bgmac_phy_read(bus->priv, mii_id, regnum);
1364}
1365
1366static int bgmac_mii_write(struct mii_bus *bus, int mii_id, int regnum,
1367 u16 value)
1368{
1369 return bgmac_phy_write(bus->priv, mii_id, regnum, value);
1370}
1371
1372static int bgmac_mii_register(struct bgmac *bgmac)
1373{
1374 struct mii_bus *mii_bus;
1375 int i, err = 0;
1376
1377 mii_bus = mdiobus_alloc();
1378 if (!mii_bus)
1379 return -ENOMEM;
1380
1381 mii_bus->name = "bgmac mii bus";
1382 sprintf(mii_bus->id, "%s-%d-%d", "bgmac", bgmac->core->bus->num,
1383 bgmac->core->core_unit);
1384 mii_bus->priv = bgmac;
1385 mii_bus->read = bgmac_mii_read;
1386 mii_bus->write = bgmac_mii_write;
1387 mii_bus->parent = &bgmac->core->dev;
1388 mii_bus->phy_mask = ~(1 << bgmac->phyaddr);
1389
1390 mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
1391 if (!mii_bus->irq) {
1392 err = -ENOMEM;
1393 goto err_free_bus;
1394 }
1395 for (i = 0; i < PHY_MAX_ADDR; i++)
1396 mii_bus->irq[i] = PHY_POLL;
1397
1398 err = mdiobus_register(mii_bus);
1399 if (err) {
1400 bgmac_err(bgmac, "Registration of mii bus failed\n");
1401 goto err_free_irq;
1402 }
1403
1404 bgmac->mii_bus = mii_bus;
1405
1406 return err;
1407
1408err_free_irq:
1409 kfree(mii_bus->irq);
1410err_free_bus:
1411 mdiobus_free(mii_bus);
1412 return err;
1413}
1414
1415static void bgmac_mii_unregister(struct bgmac *bgmac)
1416{
1417 struct mii_bus *mii_bus = bgmac->mii_bus;
1418
1419 mdiobus_unregister(mii_bus);
1420 kfree(mii_bus->irq);
1421 mdiobus_free(mii_bus);
1422}
1423
1424/**************************************************
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001425 * BCMA bus ops
1426 **************************************************/
1427
1428/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipattach */
1429static int bgmac_probe(struct bcma_device *core)
1430{
1431 struct net_device *net_dev;
1432 struct bgmac *bgmac;
1433 struct ssb_sprom *sprom = &core->bus->sprom;
1434 u8 *mac = core->core_unit ? sprom->et1mac : sprom->et0mac;
1435 int err;
1436
1437 /* We don't support 2nd, 3rd, ... units, SPROM has to be adjusted */
1438 if (core->core_unit > 1) {
1439 pr_err("Unsupported core_unit %d\n", core->core_unit);
1440 return -ENOTSUPP;
1441 }
1442
Rafał Miłeckid166f212013-02-07 00:27:17 +00001443 if (!is_valid_ether_addr(mac)) {
1444 dev_err(&core->dev, "Invalid MAC addr: %pM\n", mac);
1445 eth_random_addr(mac);
1446 dev_warn(&core->dev, "Using random MAC: %pM\n", mac);
1447 }
1448
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001449 /* Allocation and references */
1450 net_dev = alloc_etherdev(sizeof(*bgmac));
1451 if (!net_dev)
1452 return -ENOMEM;
1453 net_dev->netdev_ops = &bgmac_netdev_ops;
1454 net_dev->irq = core->irq;
1455 SET_ETHTOOL_OPS(net_dev, &bgmac_ethtool_ops);
1456 bgmac = netdev_priv(net_dev);
1457 bgmac->net_dev = net_dev;
1458 bgmac->core = core;
1459 bcma_set_drvdata(core, bgmac);
1460
1461 /* Defaults */
1462 bgmac->autoneg = true;
1463 bgmac->full_duplex = true;
1464 bgmac->speed = BGMAC_SPEED_10 | BGMAC_SPEED_100 | BGMAC_SPEED_1000;
1465 memcpy(bgmac->net_dev->dev_addr, mac, ETH_ALEN);
1466
1467 /* On BCM4706 we need common core to access PHY */
1468 if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
1469 !core->bus->drv_gmac_cmn.core) {
1470 bgmac_err(bgmac, "GMAC CMN core not found (required for BCM4706)\n");
1471 err = -ENODEV;
1472 goto err_netdev_free;
1473 }
1474 bgmac->cmn = core->bus->drv_gmac_cmn.core;
1475
1476 bgmac->phyaddr = core->core_unit ? sprom->et1phyaddr :
1477 sprom->et0phyaddr;
1478 bgmac->phyaddr &= BGMAC_PHY_MASK;
1479 if (bgmac->phyaddr == BGMAC_PHY_MASK) {
1480 bgmac_err(bgmac, "No PHY found\n");
1481 err = -ENODEV;
1482 goto err_netdev_free;
1483 }
1484 bgmac_info(bgmac, "Found PHY addr: %d%s\n", bgmac->phyaddr,
1485 bgmac->phyaddr == BGMAC_PHY_NOREGS ? " (NOREGS)" : "");
1486
1487 if (core->bus->hosttype == BCMA_HOSTTYPE_PCI) {
1488 bgmac_err(bgmac, "PCI setup not implemented\n");
1489 err = -ENOTSUPP;
1490 goto err_netdev_free;
1491 }
1492
1493 bgmac_chip_reset(bgmac);
1494
1495 err = bgmac_dma_alloc(bgmac);
1496 if (err) {
1497 bgmac_err(bgmac, "Unable to alloc memory for DMA\n");
1498 goto err_netdev_free;
1499 }
1500
1501 bgmac->int_mask = BGMAC_IS_ERRMASK | BGMAC_IS_RX | BGMAC_IS_TX_MASK;
Ralf Baechleedb15d82013-02-21 16:16:55 +01001502 if (bcm47xx_nvram_getenv("et0_no_txint", NULL, 0) == 0)
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001503 bgmac->int_mask &= ~BGMAC_IS_TX_MASK;
1504
1505 /* TODO: reset the external phy. Specs are needed */
1506 bgmac_phy_reset(bgmac);
1507
1508 bgmac->has_robosw = !!(core->bus->sprom.boardflags_lo &
1509 BGMAC_BFL_ENETROBO);
1510 if (bgmac->has_robosw)
1511 bgmac_warn(bgmac, "Support for Roboswitch not implemented\n");
1512
1513 if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM)
1514 bgmac_warn(bgmac, "Support for ADMtek ethernet switch not implemented\n");
1515
Rafał Miłecki11e5e762013-03-07 01:53:28 +00001516 err = bgmac_mii_register(bgmac);
1517 if (err) {
1518 bgmac_err(bgmac, "Cannot register MDIO\n");
1519 err = -ENOTSUPP;
1520 goto err_dma_free;
1521 }
1522
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001523 err = register_netdev(bgmac->net_dev);
1524 if (err) {
1525 bgmac_err(bgmac, "Cannot register net device\n");
1526 err = -ENOTSUPP;
Rafał Miłecki11e5e762013-03-07 01:53:28 +00001527 goto err_mii_unregister;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001528 }
1529
1530 netif_carrier_off(net_dev);
1531
1532 netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT);
1533
1534 return 0;
1535
Rafał Miłecki11e5e762013-03-07 01:53:28 +00001536err_mii_unregister:
1537 bgmac_mii_unregister(bgmac);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001538err_dma_free:
1539 bgmac_dma_free(bgmac);
1540
1541err_netdev_free:
1542 bcma_set_drvdata(core, NULL);
1543 free_netdev(net_dev);
1544
1545 return err;
1546}
1547
1548static void bgmac_remove(struct bcma_device *core)
1549{
1550 struct bgmac *bgmac = bcma_get_drvdata(core);
1551
1552 netif_napi_del(&bgmac->napi);
1553 unregister_netdev(bgmac->net_dev);
Rafał Miłecki11e5e762013-03-07 01:53:28 +00001554 bgmac_mii_unregister(bgmac);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001555 bgmac_dma_free(bgmac);
1556 bcma_set_drvdata(core, NULL);
1557 free_netdev(bgmac->net_dev);
1558}
1559
1560static struct bcma_driver bgmac_bcma_driver = {
1561 .name = KBUILD_MODNAME,
1562 .id_table = bgmac_bcma_tbl,
1563 .probe = bgmac_probe,
1564 .remove = bgmac_remove,
1565};
1566
1567static int __init bgmac_init(void)
1568{
1569 int err;
1570
1571 err = bcma_driver_register(&bgmac_bcma_driver);
1572 if (err)
1573 return err;
1574 pr_info("Broadcom 47xx GBit MAC driver loaded\n");
1575
1576 return 0;
1577}
1578
1579static void __exit bgmac_exit(void)
1580{
1581 bcma_driver_unregister(&bgmac_bcma_driver);
1582}
1583
1584module_init(bgmac_init)
1585module_exit(bgmac_exit)
1586
1587MODULE_AUTHOR("Rafał Miłecki");
1588MODULE_LICENSE("GPL");