blob: 1d96cd594adea6ca63ee71c6e521224fcec2cdde [file] [log] [blame]
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001/*
2 * Driver for (BCM4706)? GBit MAC core on BCMA bus.
3 *
4 * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
5 *
6 * Licensed under the GNU/GPL. See COPYING for details.
7 */
8
Jon Masonf6a95a22016-07-07 19:08:57 -04009
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12#include <linux/bcma/bcma.h>
13#include <linux/etherdevice.h>
Florian Westphal282ccf62017-03-29 17:17:31 +020014#include <linux/interrupt.h>
Jon Masonf6a95a22016-07-07 19:08:57 -040015#include <linux/bcm47xx_nvram.h>
Russell King13bf7762017-02-07 15:02:58 -080016#include <linux/phy.h>
17#include <linux/phy_fixed.h>
Florian Fainelli4d215ae2017-11-10 11:33:25 -080018#include <net/dsa.h>
Rafał Miłeckidd4544f2013-01-08 20:06:23 +000019#include "bgmac.h"
20
Jon Masonf6a95a22016-07-07 19:08:57 -040021static bool bgmac_wait_value(struct bgmac *bgmac, u16 reg, u32 mask,
Rafał Miłeckidd4544f2013-01-08 20:06:23 +000022 u32 value, int timeout)
23{
24 u32 val;
25 int i;
26
27 for (i = 0; i < timeout / 10; i++) {
Jon Masonf6a95a22016-07-07 19:08:57 -040028 val = bgmac_read(bgmac, reg);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +000029 if ((val & mask) == value)
30 return true;
31 udelay(10);
32 }
Jon Masonf6a95a22016-07-07 19:08:57 -040033 dev_err(bgmac->dev, "Timeout waiting for reg 0x%X\n", reg);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +000034 return false;
35}
36
37/**************************************************
38 * DMA
39 **************************************************/
40
41static void bgmac_dma_tx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
42{
43 u32 val;
44 int i;
45
46 if (!ring->mmio_base)
47 return;
48
49 /* Suspend DMA TX ring first.
50 * bgmac_wait_value doesn't support waiting for any of few values, so
51 * implement whole loop here.
52 */
53 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL,
54 BGMAC_DMA_TX_SUSPEND);
55 for (i = 0; i < 10000 / 10; i++) {
56 val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
57 val &= BGMAC_DMA_TX_STAT;
58 if (val == BGMAC_DMA_TX_STAT_DISABLED ||
59 val == BGMAC_DMA_TX_STAT_IDLEWAIT ||
60 val == BGMAC_DMA_TX_STAT_STOPPED) {
61 i = 0;
62 break;
63 }
64 udelay(10);
65 }
66 if (i)
Jon Masond00a8282016-07-07 19:08:53 -040067 dev_err(bgmac->dev, "Timeout suspending DMA TX ring 0x%X (BGMAC_DMA_TX_STAT: 0x%08X)\n",
68 ring->mmio_base, val);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +000069
70 /* Remove SUSPEND bit */
71 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, 0);
Jon Masonf6a95a22016-07-07 19:08:57 -040072 if (!bgmac_wait_value(bgmac,
Rafał Miłeckidd4544f2013-01-08 20:06:23 +000073 ring->mmio_base + BGMAC_DMA_TX_STATUS,
74 BGMAC_DMA_TX_STAT, BGMAC_DMA_TX_STAT_DISABLED,
75 10000)) {
Jon Masond00a8282016-07-07 19:08:53 -040076 dev_warn(bgmac->dev, "DMA TX ring 0x%X wasn't disabled on time, waiting additional 300us\n",
77 ring->mmio_base);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +000078 udelay(300);
79 val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
80 if ((val & BGMAC_DMA_TX_STAT) != BGMAC_DMA_TX_STAT_DISABLED)
Jon Masond00a8282016-07-07 19:08:53 -040081 dev_err(bgmac->dev, "Reset of DMA TX ring 0x%X failed\n",
82 ring->mmio_base);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +000083 }
84}
85
86static void bgmac_dma_tx_enable(struct bgmac *bgmac,
87 struct bgmac_dma_ring *ring)
88{
89 u32 ctl;
90
91 ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL);
Jon Masondb791eb2016-07-07 19:08:56 -040092 if (bgmac->feature_flags & BGMAC_FEAT_TX_MASK_SETUP) {
Hauke Mehrtens56ceecd2014-01-05 01:10:44 +010093 ctl &= ~BGMAC_DMA_TX_BL_MASK;
94 ctl |= BGMAC_DMA_TX_BL_128 << BGMAC_DMA_TX_BL_SHIFT;
95
96 ctl &= ~BGMAC_DMA_TX_MR_MASK;
97 ctl |= BGMAC_DMA_TX_MR_2 << BGMAC_DMA_TX_MR_SHIFT;
98
99 ctl &= ~BGMAC_DMA_TX_PC_MASK;
100 ctl |= BGMAC_DMA_TX_PC_16 << BGMAC_DMA_TX_PC_SHIFT;
101
102 ctl &= ~BGMAC_DMA_TX_PT_MASK;
103 ctl |= BGMAC_DMA_TX_PT_8 << BGMAC_DMA_TX_PT_SHIFT;
104 }
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000105 ctl |= BGMAC_DMA_TX_ENABLE;
106 ctl |= BGMAC_DMA_TX_PARITY_DISABLE;
107 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, ctl);
108}
109
Felix Fietkau9cde9452015-03-23 12:35:37 +0100110static void
111bgmac_dma_tx_add_buf(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
112 int i, int len, u32 ctl0)
113{
114 struct bgmac_slot_info *slot;
115 struct bgmac_dma_desc *dma_desc;
116 u32 ctl1;
117
Felix Fietkau29ba8772015-04-14 12:08:02 +0200118 if (i == BGMAC_TX_RING_SLOTS - 1)
Felix Fietkau9cde9452015-03-23 12:35:37 +0100119 ctl0 |= BGMAC_DESC_CTL0_EOT;
120
121 ctl1 = len & BGMAC_DESC_CTL1_LEN;
122
123 slot = &ring->slots[i];
124 dma_desc = &ring->cpu_base[i];
125 dma_desc->addr_low = cpu_to_le32(lower_32_bits(slot->dma_addr));
126 dma_desc->addr_high = cpu_to_le32(upper_32_bits(slot->dma_addr));
127 dma_desc->ctl0 = cpu_to_le32(ctl0);
128 dma_desc->ctl1 = cpu_to_le32(ctl1);
129}
130
Florian Fainelli4d215ae2017-11-10 11:33:25 -0800131#define ENET_BRCM_TAG_LEN 4
132
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000133static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
134 struct bgmac_dma_ring *ring,
135 struct sk_buff *skb)
136{
Jon Masona0b68482016-07-07 19:08:54 -0400137 struct device *dma_dev = bgmac->dma_dev;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000138 struct net_device *net_dev = bgmac->net_dev;
Felix Fietkaub38c83d2015-04-14 12:07:54 +0200139 int index = ring->end % BGMAC_TX_RING_SLOTS;
140 struct bgmac_slot_info *slot = &ring->slots[index];
Felix Fietkau9cde9452015-03-23 12:35:37 +0100141 int nr_frags;
142 u32 flags;
Felix Fietkau9cde9452015-03-23 12:35:37 +0100143 int i;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000144
Florian Fainelli4d215ae2017-11-10 11:33:25 -0800145 /* The Ethernet switch we are interfaced with needs packets to be at
146 * least 64 bytes (including FCS) otherwise they will be discarded when
147 * they enter the switch port logic. When Broadcom tags are enabled, we
148 * need to make sure that packets are at least 68 bytes
149 * (including FCS and tag) because the length verification is done after
150 * the Broadcom tag is stripped off the ingress packet.
151 */
152 if (netdev_uses_dsa(net_dev)) {
153 if (skb_put_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN))
154 goto err_stats;
155 }
156
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000157 if (skb->len > BGMAC_DESC_CTL1_LEN) {
Jon Masond00a8282016-07-07 19:08:53 -0400158 netdev_err(bgmac->net_dev, "Too long skb (%d)\n", skb->len);
Felix Fietkau9cde9452015-03-23 12:35:37 +0100159 goto err_drop;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000160 }
161
Felix Fietkau9cde9452015-03-23 12:35:37 +0100162 if (skb->ip_summed == CHECKSUM_PARTIAL)
163 skb_checksum_help(skb);
164
165 nr_frags = skb_shinfo(skb)->nr_frags;
166
Felix Fietkaub38c83d2015-04-14 12:07:54 +0200167 /* ring->end - ring->start will return the number of valid slots,
168 * even when ring->end overflows
169 */
170 if (ring->end - ring->start + nr_frags + 1 >= BGMAC_TX_RING_SLOTS) {
Jon Masond00a8282016-07-07 19:08:53 -0400171 netdev_err(bgmac->net_dev, "TX ring is full, queue should be stopped!\n");
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000172 netif_stop_queue(net_dev);
173 return NETDEV_TX_BUSY;
174 }
175
Felix Fietkau9cde9452015-03-23 12:35:37 +0100176 slot->dma_addr = dma_map_single(dma_dev, skb->data, skb_headlen(skb),
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000177 DMA_TO_DEVICE);
Felix Fietkau9cde9452015-03-23 12:35:37 +0100178 if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr)))
179 goto err_dma_head;
180
181 flags = BGMAC_DESC_CTL0_SOF;
182 if (!nr_frags)
183 flags |= BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC;
184
185 bgmac_dma_tx_add_buf(bgmac, ring, index, skb_headlen(skb), flags);
186 flags = 0;
187
188 for (i = 0; i < nr_frags; i++) {
189 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
190 int len = skb_frag_size(frag);
191
192 index = (index + 1) % BGMAC_TX_RING_SLOTS;
193 slot = &ring->slots[index];
194 slot->dma_addr = skb_frag_dma_map(dma_dev, frag, 0,
195 len, DMA_TO_DEVICE);
196 if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr)))
197 goto err_dma;
198
199 if (i == nr_frags - 1)
200 flags |= BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC;
201
202 bgmac_dma_tx_add_buf(bgmac, ring, index, len, flags);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000203 }
204
Felix Fietkau9cde9452015-03-23 12:35:37 +0100205 slot->skb = skb;
Felix Fietkaub38c83d2015-04-14 12:07:54 +0200206 ring->end += nr_frags + 1;
Hauke Mehrtens49a467b2013-09-29 13:54:58 +0200207 netdev_sent_queue(net_dev, skb->len);
208
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000209 wmb();
210
211 /* Increase ring->end to point empty slot. We tell hardware the first
212 * slot it should *not* read.
213 */
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000214 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
Rafał Miłecki99003032013-09-15 23:13:18 +0200215 ring->index_base +
Felix Fietkaub38c83d2015-04-14 12:07:54 +0200216 (ring->end % BGMAC_TX_RING_SLOTS) *
217 sizeof(struct bgmac_dma_desc));
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000218
Felix Fietkaub38c83d2015-04-14 12:07:54 +0200219 if (ring->end - ring->start >= BGMAC_TX_RING_SLOTS - 8)
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000220 netif_stop_queue(net_dev);
221
222 return NETDEV_TX_OK;
223
Felix Fietkau9cde9452015-03-23 12:35:37 +0100224err_dma:
225 dma_unmap_single(dma_dev, slot->dma_addr, skb_headlen(skb),
226 DMA_TO_DEVICE);
227
Florian Fainellie86663c2016-07-15 15:42:52 -0700228 while (i-- > 0) {
Felix Fietkau9cde9452015-03-23 12:35:37 +0100229 int index = (ring->end + i) % BGMAC_TX_RING_SLOTS;
230 struct bgmac_slot_info *slot = &ring->slots[index];
231 u32 ctl1 = le32_to_cpu(ring->cpu_base[index].ctl1);
232 int len = ctl1 & BGMAC_DESC_CTL1_LEN;
233
234 dma_unmap_page(dma_dev, slot->dma_addr, len, DMA_TO_DEVICE);
235 }
236
237err_dma_head:
Jon Masond00a8282016-07-07 19:08:53 -0400238 netdev_err(bgmac->net_dev, "Mapping error of skb on ring 0x%X\n",
239 ring->mmio_base);
Felix Fietkau9cde9452015-03-23 12:35:37 +0100240
241err_drop:
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000242 dev_kfree_skb(skb);
Florian Fainelli4d215ae2017-11-10 11:33:25 -0800243err_stats:
Florian Fainelli6d490f622016-06-07 15:06:15 -0700244 net_dev->stats.tx_dropped++;
245 net_dev->stats.tx_errors++;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000246 return NETDEV_TX_OK;
247}
248
249/* Free transmitted packets */
250static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
251{
Jon Masona0b68482016-07-07 19:08:54 -0400252 struct device *dma_dev = bgmac->dma_dev;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000253 int empty_slot;
254 bool freed = false;
Hauke Mehrtens49a467b2013-09-29 13:54:58 +0200255 unsigned bytes_compl = 0, pkts_compl = 0;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000256
257 /* The last slot that hardware didn't consume yet */
258 empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
259 empty_slot &= BGMAC_DMA_TX_STATDPTR;
Rafał Miłecki99003032013-09-15 23:13:18 +0200260 empty_slot -= ring->index_base;
261 empty_slot &= BGMAC_DMA_TX_STATDPTR;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000262 empty_slot /= sizeof(struct bgmac_dma_desc);
263
Felix Fietkaub38c83d2015-04-14 12:07:54 +0200264 while (ring->start != ring->end) {
265 int slot_idx = ring->start % BGMAC_TX_RING_SLOTS;
266 struct bgmac_slot_info *slot = &ring->slots[slot_idx];
Florian Fainellid2b13232016-06-23 14:23:12 -0700267 u32 ctl0, ctl1;
Felix Fietkaub38c83d2015-04-14 12:07:54 +0200268 int len;
Felix Fietkau9cde9452015-03-23 12:35:37 +0100269
Felix Fietkaub38c83d2015-04-14 12:07:54 +0200270 if (slot_idx == empty_slot)
271 break;
Felix Fietkau9cde9452015-03-23 12:35:37 +0100272
Florian Fainellid2b13232016-06-23 14:23:12 -0700273 ctl0 = le32_to_cpu(ring->cpu_base[slot_idx].ctl0);
Felix Fietkaub38c83d2015-04-14 12:07:54 +0200274 ctl1 = le32_to_cpu(ring->cpu_base[slot_idx].ctl1);
275 len = ctl1 & BGMAC_DESC_CTL1_LEN;
Florian Fainellid2b13232016-06-23 14:23:12 -0700276 if (ctl0 & BGMAC_DESC_CTL0_SOF)
Felix Fietkau9cde9452015-03-23 12:35:37 +0100277 /* Unmap no longer used buffer */
278 dma_unmap_single(dma_dev, slot->dma_addr, len,
279 DMA_TO_DEVICE);
280 else
281 dma_unmap_page(dma_dev, slot->dma_addr, len,
282 DMA_TO_DEVICE);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000283
284 if (slot->skb) {
Florian Fainelli6d490f622016-06-07 15:06:15 -0700285 bgmac->net_dev->stats.tx_bytes += slot->skb->len;
286 bgmac->net_dev->stats.tx_packets++;
Hauke Mehrtens49a467b2013-09-29 13:54:58 +0200287 bytes_compl += slot->skb->len;
288 pkts_compl++;
289
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000290 /* Free memory! :) */
291 dev_kfree_skb(slot->skb);
292 slot->skb = NULL;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000293 }
294
Felix Fietkau9cde9452015-03-23 12:35:37 +0100295 slot->dma_addr = 0;
Felix Fietkaub38c83d2015-04-14 12:07:54 +0200296 ring->start++;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000297 freed = true;
298 }
299
Felix Fietkau9cde9452015-03-23 12:35:37 +0100300 if (!pkts_compl)
301 return;
302
Hauke Mehrtens49a467b2013-09-29 13:54:58 +0200303 netdev_completed_queue(bgmac->net_dev, pkts_compl, bytes_compl);
304
Felix Fietkau9cde9452015-03-23 12:35:37 +0100305 if (netif_queue_stopped(bgmac->net_dev))
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000306 netif_wake_queue(bgmac->net_dev);
307}
308
309static void bgmac_dma_rx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
310{
311 if (!ring->mmio_base)
312 return;
313
314 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, 0);
Jon Masonf6a95a22016-07-07 19:08:57 -0400315 if (!bgmac_wait_value(bgmac,
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000316 ring->mmio_base + BGMAC_DMA_RX_STATUS,
317 BGMAC_DMA_RX_STAT, BGMAC_DMA_RX_STAT_DISABLED,
318 10000))
Jon Masond00a8282016-07-07 19:08:53 -0400319 dev_err(bgmac->dev, "Reset of ring 0x%X RX failed\n",
320 ring->mmio_base);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000321}
322
323static void bgmac_dma_rx_enable(struct bgmac *bgmac,
324 struct bgmac_dma_ring *ring)
325{
326 u32 ctl;
327
328 ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL);
Andy Gospodarekfcdefcc2016-10-31 13:32:03 -0400329
330 /* preserve ONLY bits 16-17 from current hardware value */
331 ctl &= BGMAC_DMA_RX_ADDREXT_MASK;
332
Jon Masondb791eb2016-07-07 19:08:56 -0400333 if (bgmac->feature_flags & BGMAC_FEAT_RX_MASK_SETUP) {
Hauke Mehrtens56ceecd2014-01-05 01:10:44 +0100334 ctl &= ~BGMAC_DMA_RX_BL_MASK;
335 ctl |= BGMAC_DMA_RX_BL_128 << BGMAC_DMA_RX_BL_SHIFT;
336
337 ctl &= ~BGMAC_DMA_RX_PC_MASK;
338 ctl |= BGMAC_DMA_RX_PC_8 << BGMAC_DMA_RX_PC_SHIFT;
339
340 ctl &= ~BGMAC_DMA_RX_PT_MASK;
341 ctl |= BGMAC_DMA_RX_PT_1 << BGMAC_DMA_RX_PT_SHIFT;
342 }
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000343 ctl |= BGMAC_DMA_RX_ENABLE;
344 ctl |= BGMAC_DMA_RX_PARITY_DISABLE;
345 ctl |= BGMAC_DMA_RX_OVERFLOW_CONT;
346 ctl |= BGMAC_RX_FRAME_OFFSET << BGMAC_DMA_RX_FRAME_OFFSET_SHIFT;
347 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, ctl);
348}
349
350static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
351 struct bgmac_slot_info *slot)
352{
Jon Masona0b68482016-07-07 19:08:54 -0400353 struct device *dma_dev = bgmac->dma_dev;
Nathan Hintzb757a622013-10-29 19:32:01 -0700354 dma_addr_t dma_addr;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000355 struct bgmac_rx_header *rx;
Felix Fietkau45c9b3c2015-03-23 12:35:36 +0100356 void *buf;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000357
358 /* Alloc skb */
Felix Fietkau45c9b3c2015-03-23 12:35:36 +0100359 buf = netdev_alloc_frag(BGMAC_RX_ALLOC_SIZE);
360 if (!buf)
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000361 return -ENOMEM;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000362
363 /* Poison - if everything goes fine, hardware will overwrite it */
Felix Fietkau4b62dce2015-04-14 12:07:56 +0200364 rx = buf + BGMAC_RX_BUF_OFFSET;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000365 rx->len = cpu_to_le16(0xdead);
366 rx->flags = cpu_to_le16(0xbeef);
367
368 /* Map skb for the DMA */
Felix Fietkau4b62dce2015-04-14 12:07:56 +0200369 dma_addr = dma_map_single(dma_dev, buf + BGMAC_RX_BUF_OFFSET,
370 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
Nathan Hintzb757a622013-10-29 19:32:01 -0700371 if (dma_mapping_error(dma_dev, dma_addr)) {
Jon Masond00a8282016-07-07 19:08:53 -0400372 netdev_err(bgmac->net_dev, "DMA mapping error\n");
Felix Fietkau45c9b3c2015-03-23 12:35:36 +0100373 put_page(virt_to_head_page(buf));
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000374 return -ENOMEM;
375 }
Nathan Hintzb757a622013-10-29 19:32:01 -0700376
377 /* Update the slot */
Felix Fietkau45c9b3c2015-03-23 12:35:36 +0100378 slot->buf = buf;
Nathan Hintzb757a622013-10-29 19:32:01 -0700379 slot->dma_addr = dma_addr;
380
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000381 return 0;
382}
383
Felix Fietkau4668ae12015-04-14 12:08:01 +0200384static void bgmac_dma_rx_update_index(struct bgmac *bgmac,
385 struct bgmac_dma_ring *ring)
386{
387 dma_wmb();
388
389 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
390 ring->index_base +
391 ring->end * sizeof(struct bgmac_dma_desc));
392}
393
Rafał Miłeckid549c76b2013-10-28 14:40:29 +0100394static void bgmac_dma_rx_setup_desc(struct bgmac *bgmac,
395 struct bgmac_dma_ring *ring, int desc_idx)
396{
397 struct bgmac_dma_desc *dma_desc = ring->cpu_base + desc_idx;
398 u32 ctl0 = 0, ctl1 = 0;
399
Felix Fietkau29ba8772015-04-14 12:08:02 +0200400 if (desc_idx == BGMAC_RX_RING_SLOTS - 1)
Rafał Miłeckid549c76b2013-10-28 14:40:29 +0100401 ctl0 |= BGMAC_DESC_CTL0_EOT;
402 ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN;
403 /* Is there any BGMAC device that requires extension? */
404 /* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) &
405 * B43_DMA64_DCTL1_ADDREXT_MASK;
406 */
407
408 dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[desc_idx].dma_addr));
409 dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[desc_idx].dma_addr));
410 dma_desc->ctl0 = cpu_to_le32(ctl0);
411 dma_desc->ctl1 = cpu_to_le32(ctl1);
Felix Fietkau4668ae12015-04-14 12:08:01 +0200412
413 ring->end = desc_idx;
Rafał Miłeckid549c76b2013-10-28 14:40:29 +0100414}
415
Felix Fietkau56faacd2015-04-14 12:07:57 +0200416static void bgmac_dma_rx_poison_buf(struct device *dma_dev,
417 struct bgmac_slot_info *slot)
418{
419 struct bgmac_rx_header *rx = slot->buf + BGMAC_RX_BUF_OFFSET;
420
421 dma_sync_single_for_cpu(dma_dev, slot->dma_addr, BGMAC_RX_BUF_SIZE,
422 DMA_FROM_DEVICE);
423 rx->len = cpu_to_le16(0xdead);
424 rx->flags = cpu_to_le16(0xbeef);
425 dma_sync_single_for_device(dma_dev, slot->dma_addr, BGMAC_RX_BUF_SIZE,
426 DMA_FROM_DEVICE);
427}
428
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000429static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
430 int weight)
431{
432 u32 end_slot;
433 int handled = 0;
434
435 end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS);
436 end_slot &= BGMAC_DMA_RX_STATDPTR;
Rafał Miłecki99003032013-09-15 23:13:18 +0200437 end_slot -= ring->index_base;
438 end_slot &= BGMAC_DMA_RX_STATDPTR;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000439 end_slot /= sizeof(struct bgmac_dma_desc);
440
Felix Fietkau4668ae12015-04-14 12:08:01 +0200441 while (ring->start != end_slot) {
Jon Masona0b68482016-07-07 19:08:54 -0400442 struct device *dma_dev = bgmac->dma_dev;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000443 struct bgmac_slot_info *slot = &ring->slots[ring->start];
Felix Fietkau4b62dce2015-04-14 12:07:56 +0200444 struct bgmac_rx_header *rx = slot->buf + BGMAC_RX_BUF_OFFSET;
Felix Fietkau45c9b3c2015-03-23 12:35:36 +0100445 struct sk_buff *skb;
446 void *buf = slot->buf;
Felix Fietkau56faacd2015-04-14 12:07:57 +0200447 dma_addr_t dma_addr = slot->dma_addr;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000448 u16 len, flags;
449
Rafał Miłecki92b9ccd32013-10-30 08:00:00 +0100450 do {
Felix Fietkau56faacd2015-04-14 12:07:57 +0200451 /* Prepare new skb as replacement */
452 if (bgmac_dma_rx_skb_for_slot(bgmac, slot)) {
453 bgmac_dma_rx_poison_buf(dma_dev, slot);
454 break;
455 }
456
457 /* Unmap buffer to make it accessible to the CPU */
458 dma_unmap_single(dma_dev, dma_addr,
459 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
460
461 /* Get info from the header */
462 len = le16_to_cpu(rx->len);
463 flags = le16_to_cpu(rx->flags);
Rafał Miłecki92b9ccd32013-10-30 08:00:00 +0100464
465 /* Check for poison and drop or pass the packet */
466 if (len == 0xdead && flags == 0xbeef) {
Jon Masond00a8282016-07-07 19:08:53 -0400467 netdev_err(bgmac->net_dev, "Found poisoned packet at slot %d, DMA issue!\n",
468 ring->start);
Felix Fietkau56faacd2015-04-14 12:07:57 +0200469 put_page(virt_to_head_page(buf));
Florian Fainelli6d490f622016-06-07 15:06:15 -0700470 bgmac->net_dev->stats.rx_errors++;
Rafał Miłecki92b9ccd32013-10-30 08:00:00 +0100471 break;
472 }
473
Felix Fietkau6a6c7082015-04-14 12:07:58 +0200474 if (len > BGMAC_RX_ALLOC_SIZE) {
Jon Masond00a8282016-07-07 19:08:53 -0400475 netdev_err(bgmac->net_dev, "Found oversized packet at slot %d, DMA issue!\n",
476 ring->start);
Felix Fietkau6a6c7082015-04-14 12:07:58 +0200477 put_page(virt_to_head_page(buf));
Florian Fainelli6d490f622016-06-07 15:06:15 -0700478 bgmac->net_dev->stats.rx_length_errors++;
479 bgmac->net_dev->stats.rx_errors++;
Felix Fietkau6a6c7082015-04-14 12:07:58 +0200480 break;
481 }
482
Hauke Mehrtens02e71122013-02-28 07:16:54 +0000483 /* Omit CRC. */
484 len -= ETH_FCS_LEN;
485
Felix Fietkau45c9b3c2015-03-23 12:35:36 +0100486 skb = build_skb(buf, BGMAC_RX_ALLOC_SIZE);
David S. Miller750afbf2016-01-15 16:07:13 -0500487 if (unlikely(!skb)) {
Jon Masond00a8282016-07-07 19:08:53 -0400488 netdev_err(bgmac->net_dev, "build_skb failed\n");
wangweidongf1640c32016-01-13 11:06:41 +0800489 put_page(virt_to_head_page(buf));
Florian Fainelli6d490f622016-06-07 15:06:15 -0700490 bgmac->net_dev->stats.rx_errors++;
wangweidongf1640c32016-01-13 11:06:41 +0800491 break;
492 }
Felix Fietkau4b62dce2015-04-14 12:07:56 +0200493 skb_put(skb, BGMAC_RX_FRAME_OFFSET +
494 BGMAC_RX_BUF_OFFSET + len);
495 skb_pull(skb, BGMAC_RX_FRAME_OFFSET +
496 BGMAC_RX_BUF_OFFSET);
Rafał Miłecki92b9ccd32013-10-30 08:00:00 +0100497
498 skb_checksum_none_assert(skb);
499 skb->protocol = eth_type_trans(skb, bgmac->net_dev);
Florian Fainelli6d490f622016-06-07 15:06:15 -0700500 bgmac->net_dev->stats.rx_bytes += len;
501 bgmac->net_dev->stats.rx_packets++;
Felix Fietkau45c9b3c2015-03-23 12:35:36 +0100502 napi_gro_receive(&bgmac->napi, skb);
Rafał Miłecki92b9ccd32013-10-30 08:00:00 +0100503 handled++;
504 } while (0);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000505
Felix Fietkau56faacd2015-04-14 12:07:57 +0200506 bgmac_dma_rx_setup_desc(bgmac, ring, ring->start);
507
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000508 if (++ring->start >= BGMAC_RX_RING_SLOTS)
509 ring->start = 0;
510
511 if (handled >= weight) /* Should never be greater */
512 break;
513 }
514
Felix Fietkau4668ae12015-04-14 12:08:01 +0200515 bgmac_dma_rx_update_index(bgmac, ring);
516
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000517 return handled;
518}
519
520/* Does ring support unaligned addressing? */
521static bool bgmac_dma_unaligned(struct bgmac *bgmac,
522 struct bgmac_dma_ring *ring,
523 enum bgmac_dma_ring_type ring_type)
524{
525 switch (ring_type) {
526 case BGMAC_DMA_RING_TX:
527 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
528 0xff0);
529 if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO))
530 return true;
531 break;
532 case BGMAC_DMA_RING_RX:
533 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
534 0xff0);
535 if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO))
536 return true;
537 break;
538 }
539 return false;
540}
541
Felix Fietkau45c9b3c2015-03-23 12:35:36 +0100542static void bgmac_dma_tx_ring_free(struct bgmac *bgmac,
543 struct bgmac_dma_ring *ring)
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000544{
Jon Masona0b68482016-07-07 19:08:54 -0400545 struct device *dma_dev = bgmac->dma_dev;
Felix Fietkau9cde9452015-03-23 12:35:37 +0100546 struct bgmac_dma_desc *dma_desc = ring->cpu_base;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000547 struct bgmac_slot_info *slot;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000548 int i;
549
Felix Fietkau29ba8772015-04-14 12:08:02 +0200550 for (i = 0; i < BGMAC_TX_RING_SLOTS; i++) {
Felix Fietkau9cde9452015-03-23 12:35:37 +0100551 int len = dma_desc[i].ctl1 & BGMAC_DESC_CTL1_LEN;
552
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000553 slot = &ring->slots[i];
Felix Fietkau9cde9452015-03-23 12:35:37 +0100554 dev_kfree_skb(slot->skb);
555
556 if (!slot->dma_addr)
557 continue;
558
559 if (slot->skb)
560 dma_unmap_single(dma_dev, slot->dma_addr,
561 len, DMA_TO_DEVICE);
562 else
563 dma_unmap_page(dma_dev, slot->dma_addr,
564 len, DMA_TO_DEVICE);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000565 }
Felix Fietkau45c9b3c2015-03-23 12:35:36 +0100566}
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000567
Felix Fietkau45c9b3c2015-03-23 12:35:36 +0100568static void bgmac_dma_rx_ring_free(struct bgmac *bgmac,
569 struct bgmac_dma_ring *ring)
570{
Jon Masona0b68482016-07-07 19:08:54 -0400571 struct device *dma_dev = bgmac->dma_dev;
Felix Fietkau45c9b3c2015-03-23 12:35:36 +0100572 struct bgmac_slot_info *slot;
573 int i;
574
Felix Fietkau29ba8772015-04-14 12:08:02 +0200575 for (i = 0; i < BGMAC_RX_RING_SLOTS; i++) {
Felix Fietkau45c9b3c2015-03-23 12:35:36 +0100576 slot = &ring->slots[i];
Felix Fietkau56faacd2015-04-14 12:07:57 +0200577 if (!slot->dma_addr)
Felix Fietkau45c9b3c2015-03-23 12:35:36 +0100578 continue;
579
Felix Fietkau56faacd2015-04-14 12:07:57 +0200580 dma_unmap_single(dma_dev, slot->dma_addr,
581 BGMAC_RX_BUF_SIZE,
582 DMA_FROM_DEVICE);
Felix Fietkau45c9b3c2015-03-23 12:35:36 +0100583 put_page(virt_to_head_page(slot->buf));
Felix Fietkau56faacd2015-04-14 12:07:57 +0200584 slot->dma_addr = 0;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000585 }
586}
587
Felix Fietkau45c9b3c2015-03-23 12:35:36 +0100588static void bgmac_dma_ring_desc_free(struct bgmac *bgmac,
Felix Fietkau29ba8772015-04-14 12:08:02 +0200589 struct bgmac_dma_ring *ring,
590 int num_slots)
Felix Fietkau45c9b3c2015-03-23 12:35:36 +0100591{
Jon Masona0b68482016-07-07 19:08:54 -0400592 struct device *dma_dev = bgmac->dma_dev;
Felix Fietkau45c9b3c2015-03-23 12:35:36 +0100593 int size;
594
595 if (!ring->cpu_base)
596 return;
597
598 /* Free ring of descriptors */
Felix Fietkau29ba8772015-04-14 12:08:02 +0200599 size = num_slots * sizeof(struct bgmac_dma_desc);
Felix Fietkau45c9b3c2015-03-23 12:35:36 +0100600 dma_free_coherent(dma_dev, size, ring->cpu_base,
601 ring->dma_base);
602}
603
Felix Fietkau74b6f292015-04-14 12:08:00 +0200604static void bgmac_dma_cleanup(struct bgmac *bgmac)
605{
606 int i;
607
608 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
609 bgmac_dma_tx_ring_free(bgmac, &bgmac->tx_ring[i]);
610
611 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
612 bgmac_dma_rx_ring_free(bgmac, &bgmac->rx_ring[i]);
613}
614
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000615static void bgmac_dma_free(struct bgmac *bgmac)
616{
617 int i;
618
Felix Fietkau74b6f292015-04-14 12:08:00 +0200619 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
Felix Fietkau29ba8772015-04-14 12:08:02 +0200620 bgmac_dma_ring_desc_free(bgmac, &bgmac->tx_ring[i],
621 BGMAC_TX_RING_SLOTS);
Felix Fietkau74b6f292015-04-14 12:08:00 +0200622
623 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
Felix Fietkau29ba8772015-04-14 12:08:02 +0200624 bgmac_dma_ring_desc_free(bgmac, &bgmac->rx_ring[i],
625 BGMAC_RX_RING_SLOTS);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000626}
627
628static int bgmac_dma_alloc(struct bgmac *bgmac)
629{
Jon Masona0b68482016-07-07 19:08:54 -0400630 struct device *dma_dev = bgmac->dma_dev;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000631 struct bgmac_dma_ring *ring;
632 static const u16 ring_base[] = { BGMAC_DMA_BASE0, BGMAC_DMA_BASE1,
633 BGMAC_DMA_BASE2, BGMAC_DMA_BASE3, };
634 int size; /* ring size: different for Tx and Rx */
635 int err;
636 int i;
637
638 BUILD_BUG_ON(BGMAC_MAX_TX_RINGS > ARRAY_SIZE(ring_base));
639 BUILD_BUG_ON(BGMAC_MAX_RX_RINGS > ARRAY_SIZE(ring_base));
640
Abhishek Shaha163bdb2017-07-14 00:34:08 +0530641 if (!(bgmac->feature_flags & BGMAC_FEAT_IDM_MASK)) {
642 if (!(bgmac_idm_read(bgmac, BCMA_IOST) & BCMA_IOST_DMA64)) {
643 dev_err(bgmac->dev, "Core does not report 64-bit DMA\n");
644 return -ENOTSUPP;
645 }
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000646 }
647
648 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
649 ring = &bgmac->tx_ring[i];
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000650 ring->mmio_base = ring_base[i];
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000651
652 /* Alloc ring of descriptors */
Felix Fietkau29ba8772015-04-14 12:08:02 +0200653 size = BGMAC_TX_RING_SLOTS * sizeof(struct bgmac_dma_desc);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000654 ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
655 &ring->dma_base,
656 GFP_KERNEL);
657 if (!ring->cpu_base) {
Jon Masond00a8282016-07-07 19:08:53 -0400658 dev_err(bgmac->dev, "Allocation of TX ring 0x%X failed\n",
659 ring->mmio_base);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000660 goto err_dma_free;
661 }
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000662
Rafał Miłecki99003032013-09-15 23:13:18 +0200663 ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
664 BGMAC_DMA_RING_TX);
665 if (ring->unaligned)
666 ring->index_base = lower_32_bits(ring->dma_base);
667 else
668 ring->index_base = 0;
669
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000670 /* No need to alloc TX slots yet */
671 }
672
673 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
674 ring = &bgmac->rx_ring[i];
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000675 ring->mmio_base = ring_base[i];
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000676
677 /* Alloc ring of descriptors */
Felix Fietkau29ba8772015-04-14 12:08:02 +0200678 size = BGMAC_RX_RING_SLOTS * sizeof(struct bgmac_dma_desc);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000679 ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
680 &ring->dma_base,
681 GFP_KERNEL);
682 if (!ring->cpu_base) {
Jon Masond00a8282016-07-07 19:08:53 -0400683 dev_err(bgmac->dev, "Allocation of RX ring 0x%X failed\n",
684 ring->mmio_base);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000685 err = -ENOMEM;
686 goto err_dma_free;
687 }
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000688
Rafał Miłecki99003032013-09-15 23:13:18 +0200689 ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
690 BGMAC_DMA_RING_RX);
691 if (ring->unaligned)
692 ring->index_base = lower_32_bits(ring->dma_base);
693 else
694 ring->index_base = 0;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000695 }
696
697 return 0;
698
699err_dma_free:
700 bgmac_dma_free(bgmac);
701 return -ENOMEM;
702}
703
Felix Fietkau74b6f292015-04-14 12:08:00 +0200704static int bgmac_dma_init(struct bgmac *bgmac)
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000705{
706 struct bgmac_dma_ring *ring;
Felix Fietkau74b6f292015-04-14 12:08:00 +0200707 int i, err;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000708
709 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
710 ring = &bgmac->tx_ring[i];
711
Rafał Miłecki99003032013-09-15 23:13:18 +0200712 if (!ring->unaligned)
713 bgmac_dma_tx_enable(bgmac, ring);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000714 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
715 lower_32_bits(ring->dma_base));
716 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI,
717 upper_32_bits(ring->dma_base));
Rafał Miłecki99003032013-09-15 23:13:18 +0200718 if (ring->unaligned)
719 bgmac_dma_tx_enable(bgmac, ring);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000720
721 ring->start = 0;
722 ring->end = 0; /* Points the slot that should *not* be read */
723 }
724
725 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
Rafał Miłecki70a737b2013-02-25 08:22:26 +0000726 int j;
727
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000728 ring = &bgmac->rx_ring[i];
729
Rafał Miłecki99003032013-09-15 23:13:18 +0200730 if (!ring->unaligned)
731 bgmac_dma_rx_enable(bgmac, ring);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000732 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
733 lower_32_bits(ring->dma_base));
734 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI,
735 upper_32_bits(ring->dma_base));
Rafał Miłecki99003032013-09-15 23:13:18 +0200736 if (ring->unaligned)
737 bgmac_dma_rx_enable(bgmac, ring);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000738
Felix Fietkau4668ae12015-04-14 12:08:01 +0200739 ring->start = 0;
740 ring->end = 0;
Felix Fietkau29ba8772015-04-14 12:08:02 +0200741 for (j = 0; j < BGMAC_RX_RING_SLOTS; j++) {
Felix Fietkau74b6f292015-04-14 12:08:00 +0200742 err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]);
743 if (err)
744 goto error;
745
Rafał Miłeckid549c76b2013-10-28 14:40:29 +0100746 bgmac_dma_rx_setup_desc(bgmac, ring, j);
Felix Fietkau74b6f292015-04-14 12:08:00 +0200747 }
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000748
Felix Fietkau4668ae12015-04-14 12:08:01 +0200749 bgmac_dma_rx_update_index(bgmac, ring);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000750 }
Felix Fietkau74b6f292015-04-14 12:08:00 +0200751
752 return 0;
753
754error:
755 bgmac_dma_cleanup(bgmac);
756 return err;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000757}
758
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000759
760/**************************************************
761 * Chip ops
762 **************************************************/
763
764/* TODO: can we just drop @force? Can we don't reset MAC at all if there is
765 * nothing to change? Try if after stabilizng driver.
766 */
767static void bgmac_cmdcfg_maskset(struct bgmac *bgmac, u32 mask, u32 set,
768 bool force)
769{
770 u32 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
771 u32 new_val = (cmdcfg & mask) | set;
Jon Masondb791eb2016-07-07 19:08:56 -0400772 u32 cmdcfg_sr;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000773
Jon Masondb791eb2016-07-07 19:08:56 -0400774 if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4)
775 cmdcfg_sr = BGMAC_CMDCFG_SR_REV4;
776 else
777 cmdcfg_sr = BGMAC_CMDCFG_SR_REV0;
778
779 bgmac_set(bgmac, BGMAC_CMDCFG, cmdcfg_sr);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000780 udelay(2);
781
782 if (new_val != cmdcfg || force)
783 bgmac_write(bgmac, BGMAC_CMDCFG, new_val);
784
Jon Masondb791eb2016-07-07 19:08:56 -0400785 bgmac_mask(bgmac, BGMAC_CMDCFG, ~cmdcfg_sr);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000786 udelay(2);
787}
788
Hauke Mehrtens4e209002013-02-06 04:44:58 +0000789static void bgmac_write_mac_address(struct bgmac *bgmac, u8 *addr)
790{
791 u32 tmp;
792
793 tmp = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
794 bgmac_write(bgmac, BGMAC_MACADDR_HIGH, tmp);
795 tmp = (addr[4] << 8) | addr[5];
796 bgmac_write(bgmac, BGMAC_MACADDR_LOW, tmp);
797}
798
Hauke Mehrtensc6edfe12013-02-06 05:51:49 +0000799static void bgmac_set_rx_mode(struct net_device *net_dev)
800{
801 struct bgmac *bgmac = netdev_priv(net_dev);
802
803 if (net_dev->flags & IFF_PROMISC)
Rafał Miłeckie9ba1032013-02-07 05:40:38 +0000804 bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_PROM, true);
Hauke Mehrtensc6edfe12013-02-06 05:51:49 +0000805 else
Rafał Miłeckie9ba1032013-02-07 05:40:38 +0000806 bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_PROM, 0, true);
Hauke Mehrtensc6edfe12013-02-06 05:51:49 +0000807}
808
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000809#if 0 /* We don't use that regs yet */
810static void bgmac_chip_stats_update(struct bgmac *bgmac)
811{
812 int i;
813
Jon Masondb791eb2016-07-07 19:08:56 -0400814 if (!(bgmac->feature_flags & BGMAC_FEAT_NO_CLR_MIB)) {
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000815 for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++)
816 bgmac->mib_tx_regs[i] =
817 bgmac_read(bgmac,
818 BGMAC_TX_GOOD_OCTETS + (i * 4));
819 for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++)
820 bgmac->mib_rx_regs[i] =
821 bgmac_read(bgmac,
822 BGMAC_RX_GOOD_OCTETS + (i * 4));
823 }
824
825 /* TODO: what else? how to handle BCM4706? Specs are needed */
826}
827#endif
828
829static void bgmac_clear_mib(struct bgmac *bgmac)
830{
831 int i;
832
Jon Masondb791eb2016-07-07 19:08:56 -0400833 if (bgmac->feature_flags & BGMAC_FEAT_NO_CLR_MIB)
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000834 return;
835
836 bgmac_set(bgmac, BGMAC_DEV_CTL, BGMAC_DC_MROR);
837 for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++)
838 bgmac_read(bgmac, BGMAC_TX_GOOD_OCTETS + (i * 4));
839 for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++)
840 bgmac_read(bgmac, BGMAC_RX_GOOD_OCTETS + (i * 4));
841}
842
843/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_speed */
Rafał Miłecki5824d2d2013-12-07 00:53:55 +0100844static void bgmac_mac_speed(struct bgmac *bgmac)
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000845{
846 u32 mask = ~(BGMAC_CMDCFG_ES_MASK | BGMAC_CMDCFG_HD);
847 u32 set = 0;
848
Rafał Miłecki5824d2d2013-12-07 00:53:55 +0100849 switch (bgmac->mac_speed) {
850 case SPEED_10:
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000851 set |= BGMAC_CMDCFG_ES_10;
Rafał Miłecki5824d2d2013-12-07 00:53:55 +0100852 break;
853 case SPEED_100:
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000854 set |= BGMAC_CMDCFG_ES_100;
Rafał Miłecki5824d2d2013-12-07 00:53:55 +0100855 break;
856 case SPEED_1000:
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000857 set |= BGMAC_CMDCFG_ES_1000;
Rafał Miłecki5824d2d2013-12-07 00:53:55 +0100858 break;
Hauke Mehrtens6df4aff2014-01-05 01:10:47 +0100859 case SPEED_2500:
860 set |= BGMAC_CMDCFG_ES_2500;
861 break;
Rafał Miłecki5824d2d2013-12-07 00:53:55 +0100862 default:
Jon Masond00a8282016-07-07 19:08:53 -0400863 dev_err(bgmac->dev, "Unsupported speed: %d\n",
864 bgmac->mac_speed);
Rafał Miłecki5824d2d2013-12-07 00:53:55 +0100865 }
866
867 if (bgmac->mac_duplex == DUPLEX_HALF)
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000868 set |= BGMAC_CMDCFG_HD;
Rafał Miłecki5824d2d2013-12-07 00:53:55 +0100869
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000870 bgmac_cmdcfg_maskset(bgmac, mask, set, true);
871}
872
873static void bgmac_miiconfig(struct bgmac *bgmac)
874{
Jon Masondb791eb2016-07-07 19:08:56 -0400875 if (bgmac->feature_flags & BGMAC_FEAT_FORCE_SPEED_2500) {
Abhishek Shaha163bdb2017-07-14 00:34:08 +0530876 if (!(bgmac->feature_flags & BGMAC_FEAT_IDM_MASK)) {
877 bgmac_idm_write(bgmac, BCMA_IOCTL,
878 bgmac_idm_read(bgmac, BCMA_IOCTL) |
879 0x40 | BGMAC_BCMA_IOCTL_SW_CLKEN);
880 }
Hauke Mehrtens6df4aff2014-01-05 01:10:47 +0100881 bgmac->mac_speed = SPEED_2500;
Rafał Miłecki5824d2d2013-12-07 00:53:55 +0100882 bgmac->mac_duplex = DUPLEX_FULL;
883 bgmac_mac_speed(bgmac);
Hauke Mehrtens6df4aff2014-01-05 01:10:47 +0100884 } else {
Jon Masondb791eb2016-07-07 19:08:56 -0400885 u8 imode;
886
Hauke Mehrtens6df4aff2014-01-05 01:10:47 +0100887 imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) &
888 BGMAC_DS_MM_MASK) >> BGMAC_DS_MM_SHIFT;
889 if (imode == 0 || imode == 1) {
890 bgmac->mac_speed = SPEED_100;
891 bgmac->mac_duplex = DUPLEX_FULL;
892 bgmac_mac_speed(bgmac);
893 }
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000894 }
895}
896
Abhishek Shaha163bdb2017-07-14 00:34:08 +0530897static void bgmac_chip_reset_idm_config(struct bgmac *bgmac)
898{
899 u32 iost;
900
901 iost = bgmac_idm_read(bgmac, BCMA_IOST);
902 if (bgmac->feature_flags & BGMAC_FEAT_IOST_ATTACHED)
903 iost &= ~BGMAC_BCMA_IOST_ATTACHED;
904
905 /* 3GMAC: for BCM4707 & BCM47094, only do core reset at bgmac_probe() */
906 if (!(bgmac->feature_flags & BGMAC_FEAT_NO_RESET)) {
907 u32 flags = 0;
908
909 if (iost & BGMAC_BCMA_IOST_ATTACHED) {
910 flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
911 if (!bgmac->has_robosw)
912 flags |= BGMAC_BCMA_IOCTL_SW_RESET;
913 }
914 bgmac_clk_enable(bgmac, flags);
915 }
916
917 if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw)
918 bgmac_idm_write(bgmac, BCMA_IOCTL,
919 bgmac_idm_read(bgmac, BCMA_IOCTL) &
920 ~BGMAC_BCMA_IOCTL_SW_RESET);
921}
922
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000923/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipreset */
924static void bgmac_chip_reset(struct bgmac *bgmac)
925{
Jon Masondb791eb2016-07-07 19:08:56 -0400926 u32 cmdcfg_sr;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000927 int i;
928
Jon Masonf6a95a22016-07-07 19:08:57 -0400929 if (bgmac_clk_enabled(bgmac)) {
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000930 if (!bgmac->stats_grabbed) {
931 /* bgmac_chip_stats_update(bgmac); */
932 bgmac->stats_grabbed = true;
933 }
934
935 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
936 bgmac_dma_tx_reset(bgmac, &bgmac->tx_ring[i]);
937
938 bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
939 udelay(1);
940
941 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
942 bgmac_dma_rx_reset(bgmac, &bgmac->rx_ring[i]);
943
944 /* TODO: Clear software multicast filter list */
945 }
946
Abhishek Shaha163bdb2017-07-14 00:34:08 +0530947 if (!(bgmac->feature_flags & BGMAC_FEAT_IDM_MASK))
948 bgmac_chip_reset_idm_config(bgmac);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000949
Hauke Mehrtens6df4aff2014-01-05 01:10:47 +0100950 /* Request Misc PLL for corerev > 2 */
Jon Masondb791eb2016-07-07 19:08:56 -0400951 if (bgmac->feature_flags & BGMAC_FEAT_MISC_PLL_REQ) {
Rafał Miłecki1a0ab762013-12-11 08:44:37 +0100952 bgmac_set(bgmac, BCMA_CLKCTLST,
953 BGMAC_BCMA_CLKCTLST_MISC_PLL_REQ);
Jon Masonf6a95a22016-07-07 19:08:57 -0400954 bgmac_wait_value(bgmac, BCMA_CLKCTLST,
Rafał Miłecki1a0ab762013-12-11 08:44:37 +0100955 BGMAC_BCMA_CLKCTLST_MISC_PLL_ST,
956 BGMAC_BCMA_CLKCTLST_MISC_PLL_ST,
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000957 1000);
958 }
959
Jon Masondb791eb2016-07-07 19:08:56 -0400960 if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_PHY) {
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000961 u8 et_swtype = 0;
962 u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY |
Rafał Miłecki6a391e72013-09-15 00:22:47 +0200963 BGMAC_CHIPCTL_1_IF_TYPE_MII;
Hauke Mehrtens36472682013-09-15 22:49:08 +0200964 char buf[4];
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000965
Hauke Mehrtens36472682013-09-15 22:49:08 +0200966 if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) {
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000967 if (kstrtou8(buf, 0, &et_swtype))
Jon Masond00a8282016-07-07 19:08:53 -0400968 dev_err(bgmac->dev, "Failed to parse et_swtype (%s)\n",
969 buf);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000970 et_swtype &= 0x0f;
971 et_swtype <<= 4;
972 sw_type = et_swtype;
Jon Masondb791eb2016-07-07 19:08:56 -0400973 } else if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_EPHYRMII) {
Rafał Miłeckie2d8f642016-08-17 23:11:52 +0200974 sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RMII |
975 BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII;
Jon Masondb791eb2016-07-07 19:08:56 -0400976 } else if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_RGMII) {
Hauke Mehrtensb5a4c2f2013-02-06 04:44:57 +0000977 sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RGMII |
978 BGMAC_CHIPCTL_1_SW_TYPE_RGMII;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +0000979 }
Jon Masonf6a95a22016-07-07 19:08:57 -0400980 bgmac_cco_ctl_maskset(bgmac, 1, ~(BGMAC_CHIPCTL_1_IF_TYPE_MASK |
981 BGMAC_CHIPCTL_1_SW_TYPE_MASK),
982 sw_type);
Rafał Miłecki1cb94db2016-08-17 23:00:30 +0200983 } else if (bgmac->feature_flags & BGMAC_FEAT_CC4_IF_SW_TYPE) {
984 u32 sw_type = BGMAC_CHIPCTL_4_IF_TYPE_MII |
985 BGMAC_CHIPCTL_4_SW_TYPE_EPHY;
986 u8 et_swtype = 0;
987 char buf[4];
988
989 if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) {
990 if (kstrtou8(buf, 0, &et_swtype))
991 dev_err(bgmac->dev, "Failed to parse et_swtype (%s)\n",
992 buf);
993 sw_type = (et_swtype & 0x0f) << 12;
994 } else if (bgmac->feature_flags & BGMAC_FEAT_CC4_IF_SW_TYPE_RGMII) {
995 sw_type = BGMAC_CHIPCTL_4_IF_TYPE_RGMII |
996 BGMAC_CHIPCTL_4_SW_TYPE_RGMII;
997 }
998 bgmac_cco_ctl_maskset(bgmac, 4, ~(BGMAC_CHIPCTL_4_IF_TYPE_MASK |
999 BGMAC_CHIPCTL_4_SW_TYPE_MASK),
1000 sw_type);
1001 } else if (bgmac->feature_flags & BGMAC_FEAT_CC7_IF_TYPE_RGMII) {
1002 bgmac_cco_ctl_maskset(bgmac, 7, ~BGMAC_CHIPCTL_7_IF_TYPE_MASK,
1003 BGMAC_CHIPCTL_7_IF_TYPE_RGMII);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001004 }
1005
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001006 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset
1007 * Specs don't say about using BGMAC_CMDCFG_SR, but in this routine
1008 * BGMAC_CMDCFG is read _after_ putting chip in a reset. So it has to
1009 * be keps until taking MAC out of the reset.
1010 */
Jon Masondb791eb2016-07-07 19:08:56 -04001011 if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4)
1012 cmdcfg_sr = BGMAC_CMDCFG_SR_REV4;
1013 else
1014 cmdcfg_sr = BGMAC_CMDCFG_SR_REV0;
1015
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001016 bgmac_cmdcfg_maskset(bgmac,
1017 ~(BGMAC_CMDCFG_TE |
1018 BGMAC_CMDCFG_RE |
1019 BGMAC_CMDCFG_RPI |
1020 BGMAC_CMDCFG_TAI |
1021 BGMAC_CMDCFG_HD |
1022 BGMAC_CMDCFG_ML |
1023 BGMAC_CMDCFG_CFE |
1024 BGMAC_CMDCFG_RL |
1025 BGMAC_CMDCFG_RED |
1026 BGMAC_CMDCFG_PE |
1027 BGMAC_CMDCFG_TPI |
1028 BGMAC_CMDCFG_PAD_EN |
1029 BGMAC_CMDCFG_PF),
1030 BGMAC_CMDCFG_PROM |
1031 BGMAC_CMDCFG_NLC |
1032 BGMAC_CMDCFG_CFE |
Jon Masondb791eb2016-07-07 19:08:56 -04001033 cmdcfg_sr,
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001034 false);
Rafał Miłeckid4699622013-12-11 07:44:14 +01001035 bgmac->mac_speed = SPEED_UNKNOWN;
1036 bgmac->mac_duplex = DUPLEX_UNKNOWN;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001037
1038 bgmac_clear_mib(bgmac);
Jon Masondb791eb2016-07-07 19:08:56 -04001039 if (bgmac->feature_flags & BGMAC_FEAT_CMN_PHY_CTL)
Jon Masonf6a95a22016-07-07 19:08:57 -04001040 bgmac_cmn_maskset32(bgmac, BCMA_GMAC_CMN_PHY_CTL, ~0,
1041 BCMA_GMAC_CMN_PC_MTE);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001042 else
1043 bgmac_set(bgmac, BGMAC_PHY_CNTL, BGMAC_PC_MTE);
1044 bgmac_miiconfig(bgmac);
Jon Mason55954f32016-07-07 19:08:55 -04001045 if (bgmac->mii_bus)
1046 bgmac->mii_bus->reset(bgmac->mii_bus);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001047
Hauke Mehrtens49a467b2013-09-29 13:54:58 +02001048 netdev_reset_queue(bgmac->net_dev);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001049}
1050
1051static void bgmac_chip_intrs_on(struct bgmac *bgmac)
1052{
1053 bgmac_write(bgmac, BGMAC_INT_MASK, bgmac->int_mask);
1054}
1055
1056static void bgmac_chip_intrs_off(struct bgmac *bgmac)
1057{
1058 bgmac_write(bgmac, BGMAC_INT_MASK, 0);
Nathan Hintz41608152013-02-13 19:14:10 +00001059 bgmac_read(bgmac, BGMAC_INT_MASK);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001060}
1061
1062/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_enable */
1063static void bgmac_enable(struct bgmac *bgmac)
1064{
Jon Masondb791eb2016-07-07 19:08:56 -04001065 u32 cmdcfg_sr;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001066 u32 cmdcfg;
1067 u32 mode;
Jon Masondb791eb2016-07-07 19:08:56 -04001068
1069 if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4)
1070 cmdcfg_sr = BGMAC_CMDCFG_SR_REV4;
1071 else
1072 cmdcfg_sr = BGMAC_CMDCFG_SR_REV0;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001073
1074 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
1075 bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE),
Jon Masondb791eb2016-07-07 19:08:56 -04001076 cmdcfg_sr, true);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001077 udelay(2);
1078 cmdcfg |= BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE;
1079 bgmac_write(bgmac, BGMAC_CMDCFG, cmdcfg);
1080
1081 mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
1082 BGMAC_DS_MM_SHIFT;
Rafał Miłeckicdb26d32016-11-07 13:53:27 +01001083 if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST || mode != 0)
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001084 bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT);
Rafał Miłeckicdb26d32016-11-07 13:53:27 +01001085 if (!(bgmac->feature_flags & BGMAC_FEAT_CLKCTLST) && mode == 2)
Jon Masonf6a95a22016-07-07 19:08:57 -04001086 bgmac_cco_ctl_maskset(bgmac, 1, ~0,
1087 BGMAC_CHIPCTL_1_RXC_DLL_BYPASS);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001088
Jon Masondb791eb2016-07-07 19:08:56 -04001089 if (bgmac->feature_flags & (BGMAC_FEAT_FLW_CTRL1 |
1090 BGMAC_FEAT_FLW_CTRL2)) {
1091 u32 fl_ctl;
1092
1093 if (bgmac->feature_flags & BGMAC_FEAT_FLW_CTRL1)
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001094 fl_ctl = 0x2300e1;
Jon Masondb791eb2016-07-07 19:08:56 -04001095 else
1096 fl_ctl = 0x03cb04cb;
1097
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001098 bgmac_write(bgmac, BGMAC_FLOW_CTL_THRESH, fl_ctl);
1099 bgmac_write(bgmac, BGMAC_PAUSE_CTL, 0x27fff);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001100 }
1101
Jon Masondb791eb2016-07-07 19:08:56 -04001102 if (bgmac->feature_flags & BGMAC_FEAT_SET_RXQ_CLK) {
1103 u32 rxq_ctl;
1104 u16 bp_clk;
1105 u8 mdp;
1106
Hauke Mehrtens6df4aff2014-01-05 01:10:47 +01001107 rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL);
1108 rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK;
Jon Masonf6a95a22016-07-07 19:08:57 -04001109 bp_clk = bgmac_get_bus_clock(bgmac) / 1000000;
Hauke Mehrtens6df4aff2014-01-05 01:10:47 +01001110 mdp = (bp_clk * 128 / 1000) - 3;
1111 rxq_ctl |= (mdp << BGMAC_RXQ_CTL_MDP_SHIFT);
1112 bgmac_write(bgmac, BGMAC_RXQ_CTL, rxq_ctl);
1113 }
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001114}
1115
1116/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipinit */
Felix Fietkau74b6f292015-04-14 12:08:00 +02001117static void bgmac_chip_init(struct bgmac *bgmac)
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001118{
Jon Masondd5c5d02016-11-04 01:11:01 -04001119 /* Clear any erroneously pending interrupts */
1120 bgmac_write(bgmac, BGMAC_INT_STATUS, ~0);
1121
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001122 /* 1 interrupt per received frame */
1123 bgmac_write(bgmac, BGMAC_INT_RECV_LAZY, 1 << BGMAC_IRL_FC_SHIFT);
1124
1125 /* Enable 802.3x tx flow control (honor received PAUSE frames) */
1126 bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_RPI, 0, true);
1127
Hauke Mehrtensc6edfe12013-02-06 05:51:49 +00001128 bgmac_set_rx_mode(bgmac->net_dev);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001129
Hauke Mehrtens4e209002013-02-06 04:44:58 +00001130 bgmac_write_mac_address(bgmac, bgmac->net_dev->dev_addr);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001131
1132 if (bgmac->loopback)
Rafał Miłeckie9ba1032013-02-07 05:40:38 +00001133 bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001134 else
Rafał Miłeckie9ba1032013-02-07 05:40:38 +00001135 bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_ML, 0, false);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001136
1137 bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + ETHER_MAX_LEN);
1138
Felix Fietkau74b6f292015-04-14 12:08:00 +02001139 bgmac_chip_intrs_on(bgmac);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001140
1141 bgmac_enable(bgmac);
1142}
1143
1144static irqreturn_t bgmac_interrupt(int irq, void *dev_id)
1145{
1146 struct bgmac *bgmac = netdev_priv(dev_id);
1147
1148 u32 int_status = bgmac_read(bgmac, BGMAC_INT_STATUS);
1149 int_status &= bgmac->int_mask;
1150
1151 if (!int_status)
1152 return IRQ_NONE;
1153
Felix Fietkaueb64e292015-04-14 12:07:55 +02001154 int_status &= ~(BGMAC_IS_TX0 | BGMAC_IS_RX);
1155 if (int_status)
Jon Masond00a8282016-07-07 19:08:53 -04001156 dev_err(bgmac->dev, "Unknown IRQs: 0x%08X\n", int_status);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001157
1158 /* Disable new interrupts until handling existing ones */
1159 bgmac_chip_intrs_off(bgmac);
1160
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001161 napi_schedule(&bgmac->napi);
1162
1163 return IRQ_HANDLED;
1164}
1165
1166static int bgmac_poll(struct napi_struct *napi, int weight)
1167{
1168 struct bgmac *bgmac = container_of(napi, struct bgmac, napi);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001169 int handled = 0;
1170
Felix Fietkaueb64e292015-04-14 12:07:55 +02001171 /* Ack */
1172 bgmac_write(bgmac, BGMAC_INT_STATUS, ~0);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001173
Felix Fietkaueb64e292015-04-14 12:07:55 +02001174 bgmac_dma_tx_free(bgmac, &bgmac->tx_ring[0]);
1175 handled += bgmac_dma_rx_read(bgmac, &bgmac->rx_ring[0], weight);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001176
Felix Fietkaueb64e292015-04-14 12:07:55 +02001177 /* Poll again if more events arrived in the meantime */
1178 if (bgmac_read(bgmac, BGMAC_INT_STATUS) & (BGMAC_IS_TX0 | BGMAC_IS_RX))
Rafał Miłeckie5802672015-04-23 20:56:29 +02001179 return weight;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001180
Hauke Mehrtens43f159c2015-01-18 19:49:59 +01001181 if (handled < weight) {
Eric Dumazet6ad20162017-01-30 08:22:01 -08001182 napi_complete_done(napi, handled);
Hauke Mehrtens43f159c2015-01-18 19:49:59 +01001183 bgmac_chip_intrs_on(bgmac);
1184 }
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001185
1186 return handled;
1187}
1188
1189/**************************************************
1190 * net_device_ops
1191 **************************************************/
1192
1193static int bgmac_open(struct net_device *net_dev)
1194{
1195 struct bgmac *bgmac = netdev_priv(net_dev);
1196 int err = 0;
1197
1198 bgmac_chip_reset(bgmac);
Felix Fietkau74b6f292015-04-14 12:08:00 +02001199
1200 err = bgmac_dma_init(bgmac);
1201 if (err)
1202 return err;
1203
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001204 /* Specs say about reclaiming rings here, but we do that in DMA init */
Felix Fietkau74b6f292015-04-14 12:08:00 +02001205 bgmac_chip_init(bgmac);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001206
Jon Masonf6a95a22016-07-07 19:08:57 -04001207 err = request_irq(bgmac->irq, bgmac_interrupt, IRQF_SHARED,
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001208 KBUILD_MODNAME, net_dev);
1209 if (err < 0) {
Jon Masond00a8282016-07-07 19:08:53 -04001210 dev_err(bgmac->dev, "IRQ request error: %d!\n", err);
Felix Fietkau74b6f292015-04-14 12:08:00 +02001211 bgmac_dma_cleanup(bgmac);
1212 return err;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001213 }
1214 napi_enable(&bgmac->napi);
1215
Philippe Reynesb21fcb22016-06-19 22:37:05 +02001216 phy_start(net_dev->phydev);
Rafał Miłecki4e34da4d2013-12-10 17:19:39 +01001217
Florian Fainellic3897f22016-06-23 14:25:32 -07001218 netif_start_queue(net_dev);
1219
Felix Fietkau74b6f292015-04-14 12:08:00 +02001220 return 0;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001221}
1222
1223static int bgmac_stop(struct net_device *net_dev)
1224{
1225 struct bgmac *bgmac = netdev_priv(net_dev);
1226
1227 netif_carrier_off(net_dev);
1228
Philippe Reynesb21fcb22016-06-19 22:37:05 +02001229 phy_stop(net_dev->phydev);
Rafał Miłecki4e34da4d2013-12-10 17:19:39 +01001230
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001231 napi_disable(&bgmac->napi);
1232 bgmac_chip_intrs_off(bgmac);
Jon Masonf6a95a22016-07-07 19:08:57 -04001233 free_irq(bgmac->irq, net_dev);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001234
1235 bgmac_chip_reset(bgmac);
Felix Fietkau74b6f292015-04-14 12:08:00 +02001236 bgmac_dma_cleanup(bgmac);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001237
1238 return 0;
1239}
1240
1241static netdev_tx_t bgmac_start_xmit(struct sk_buff *skb,
1242 struct net_device *net_dev)
1243{
1244 struct bgmac *bgmac = netdev_priv(net_dev);
1245 struct bgmac_dma_ring *ring;
1246
1247 /* No QOS support yet */
1248 ring = &bgmac->tx_ring[0];
1249 return bgmac_dma_tx_add(bgmac, ring, skb);
1250}
1251
Hauke Mehrtens4e209002013-02-06 04:44:58 +00001252static int bgmac_set_mac_address(struct net_device *net_dev, void *addr)
1253{
1254 struct bgmac *bgmac = netdev_priv(net_dev);
Hari Vyasfa422452017-03-02 17:59:57 -05001255 struct sockaddr *sa = addr;
Hauke Mehrtens4e209002013-02-06 04:44:58 +00001256 int ret;
1257
1258 ret = eth_prepare_mac_addr_change(net_dev, addr);
1259 if (ret < 0)
1260 return ret;
Hari Vyasfa422452017-03-02 17:59:57 -05001261
1262 ether_addr_copy(net_dev->dev_addr, sa->sa_data);
1263 bgmac_write_mac_address(bgmac, net_dev->dev_addr);
1264
Hauke Mehrtens4e209002013-02-06 04:44:58 +00001265 eth_commit_mac_addr_change(net_dev, addr);
1266 return 0;
1267}
1268
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001269static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
1270{
Hauke Mehrtens69c58852013-12-20 15:34:45 +01001271 if (!netif_running(net_dev))
1272 return -EINVAL;
1273
Philippe Reynesb21fcb22016-06-19 22:37:05 +02001274 return phy_mii_ioctl(net_dev->phydev, ifr, cmd);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001275}
1276
1277static const struct net_device_ops bgmac_netdev_ops = {
1278 .ndo_open = bgmac_open,
1279 .ndo_stop = bgmac_stop,
1280 .ndo_start_xmit = bgmac_start_xmit,
Hauke Mehrtensc6edfe12013-02-06 05:51:49 +00001281 .ndo_set_rx_mode = bgmac_set_rx_mode,
Hauke Mehrtens4e209002013-02-06 04:44:58 +00001282 .ndo_set_mac_address = bgmac_set_mac_address,
Hauke Mehrtens522c5902013-02-06 04:44:59 +00001283 .ndo_validate_addr = eth_validate_addr,
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001284 .ndo_do_ioctl = bgmac_ioctl,
1285};
1286
1287/**************************************************
1288 * ethtool_ops
1289 **************************************************/
1290
Florian Fainellif6613d42016-06-07 15:06:14 -07001291struct bgmac_stat {
1292 u8 size;
1293 u32 offset;
1294 const char *name;
1295};
1296
1297static struct bgmac_stat bgmac_get_strings_stats[] = {
1298 { 8, BGMAC_TX_GOOD_OCTETS, "tx_good_octets" },
1299 { 4, BGMAC_TX_GOOD_PKTS, "tx_good" },
1300 { 8, BGMAC_TX_OCTETS, "tx_octets" },
1301 { 4, BGMAC_TX_PKTS, "tx_pkts" },
1302 { 4, BGMAC_TX_BROADCAST_PKTS, "tx_broadcast" },
1303 { 4, BGMAC_TX_MULTICAST_PKTS, "tx_multicast" },
1304 { 4, BGMAC_TX_LEN_64, "tx_64" },
1305 { 4, BGMAC_TX_LEN_65_TO_127, "tx_65_127" },
1306 { 4, BGMAC_TX_LEN_128_TO_255, "tx_128_255" },
1307 { 4, BGMAC_TX_LEN_256_TO_511, "tx_256_511" },
1308 { 4, BGMAC_TX_LEN_512_TO_1023, "tx_512_1023" },
1309 { 4, BGMAC_TX_LEN_1024_TO_1522, "tx_1024_1522" },
1310 { 4, BGMAC_TX_LEN_1523_TO_2047, "tx_1523_2047" },
1311 { 4, BGMAC_TX_LEN_2048_TO_4095, "tx_2048_4095" },
1312 { 4, BGMAC_TX_LEN_4096_TO_8191, "tx_4096_8191" },
1313 { 4, BGMAC_TX_LEN_8192_TO_MAX, "tx_8192_max" },
1314 { 4, BGMAC_TX_JABBER_PKTS, "tx_jabber" },
1315 { 4, BGMAC_TX_OVERSIZE_PKTS, "tx_oversize" },
1316 { 4, BGMAC_TX_FRAGMENT_PKTS, "tx_fragment" },
1317 { 4, BGMAC_TX_UNDERRUNS, "tx_underruns" },
1318 { 4, BGMAC_TX_TOTAL_COLS, "tx_total_cols" },
1319 { 4, BGMAC_TX_SINGLE_COLS, "tx_single_cols" },
1320 { 4, BGMAC_TX_MULTIPLE_COLS, "tx_multiple_cols" },
1321 { 4, BGMAC_TX_EXCESSIVE_COLS, "tx_excessive_cols" },
1322 { 4, BGMAC_TX_LATE_COLS, "tx_late_cols" },
1323 { 4, BGMAC_TX_DEFERED, "tx_defered" },
1324 { 4, BGMAC_TX_CARRIER_LOST, "tx_carrier_lost" },
1325 { 4, BGMAC_TX_PAUSE_PKTS, "tx_pause" },
1326 { 4, BGMAC_TX_UNI_PKTS, "tx_unicast" },
1327 { 4, BGMAC_TX_Q0_PKTS, "tx_q0" },
1328 { 8, BGMAC_TX_Q0_OCTETS, "tx_q0_octets" },
1329 { 4, BGMAC_TX_Q1_PKTS, "tx_q1" },
1330 { 8, BGMAC_TX_Q1_OCTETS, "tx_q1_octets" },
1331 { 4, BGMAC_TX_Q2_PKTS, "tx_q2" },
1332 { 8, BGMAC_TX_Q2_OCTETS, "tx_q2_octets" },
1333 { 4, BGMAC_TX_Q3_PKTS, "tx_q3" },
1334 { 8, BGMAC_TX_Q3_OCTETS, "tx_q3_octets" },
1335 { 8, BGMAC_RX_GOOD_OCTETS, "rx_good_octets" },
1336 { 4, BGMAC_RX_GOOD_PKTS, "rx_good" },
1337 { 8, BGMAC_RX_OCTETS, "rx_octets" },
1338 { 4, BGMAC_RX_PKTS, "rx_pkts" },
1339 { 4, BGMAC_RX_BROADCAST_PKTS, "rx_broadcast" },
1340 { 4, BGMAC_RX_MULTICAST_PKTS, "rx_multicast" },
1341 { 4, BGMAC_RX_LEN_64, "rx_64" },
1342 { 4, BGMAC_RX_LEN_65_TO_127, "rx_65_127" },
1343 { 4, BGMAC_RX_LEN_128_TO_255, "rx_128_255" },
1344 { 4, BGMAC_RX_LEN_256_TO_511, "rx_256_511" },
1345 { 4, BGMAC_RX_LEN_512_TO_1023, "rx_512_1023" },
1346 { 4, BGMAC_RX_LEN_1024_TO_1522, "rx_1024_1522" },
1347 { 4, BGMAC_RX_LEN_1523_TO_2047, "rx_1523_2047" },
1348 { 4, BGMAC_RX_LEN_2048_TO_4095, "rx_2048_4095" },
1349 { 4, BGMAC_RX_LEN_4096_TO_8191, "rx_4096_8191" },
1350 { 4, BGMAC_RX_LEN_8192_TO_MAX, "rx_8192_max" },
1351 { 4, BGMAC_RX_JABBER_PKTS, "rx_jabber" },
1352 { 4, BGMAC_RX_OVERSIZE_PKTS, "rx_oversize" },
1353 { 4, BGMAC_RX_FRAGMENT_PKTS, "rx_fragment" },
1354 { 4, BGMAC_RX_MISSED_PKTS, "rx_missed" },
1355 { 4, BGMAC_RX_CRC_ALIGN_ERRS, "rx_crc_align" },
1356 { 4, BGMAC_RX_UNDERSIZE, "rx_undersize" },
1357 { 4, BGMAC_RX_CRC_ERRS, "rx_crc" },
1358 { 4, BGMAC_RX_ALIGN_ERRS, "rx_align" },
1359 { 4, BGMAC_RX_SYMBOL_ERRS, "rx_symbol" },
1360 { 4, BGMAC_RX_PAUSE_PKTS, "rx_pause" },
1361 { 4, BGMAC_RX_NONPAUSE_PKTS, "rx_nonpause" },
1362 { 4, BGMAC_RX_SACHANGES, "rx_sa_changes" },
1363 { 4, BGMAC_RX_UNI_PKTS, "rx_unicast" },
1364};
1365
1366#define BGMAC_STATS_LEN ARRAY_SIZE(bgmac_get_strings_stats)
1367
1368static int bgmac_get_sset_count(struct net_device *dev, int string_set)
1369{
1370 switch (string_set) {
1371 case ETH_SS_STATS:
1372 return BGMAC_STATS_LEN;
1373 }
1374
1375 return -EOPNOTSUPP;
1376}
1377
1378static void bgmac_get_strings(struct net_device *dev, u32 stringset,
1379 u8 *data)
1380{
1381 int i;
1382
1383 if (stringset != ETH_SS_STATS)
1384 return;
1385
1386 for (i = 0; i < BGMAC_STATS_LEN; i++)
1387 strlcpy(data + i * ETH_GSTRING_LEN,
1388 bgmac_get_strings_stats[i].name, ETH_GSTRING_LEN);
1389}
1390
1391static void bgmac_get_ethtool_stats(struct net_device *dev,
1392 struct ethtool_stats *ss, uint64_t *data)
1393{
1394 struct bgmac *bgmac = netdev_priv(dev);
1395 const struct bgmac_stat *s;
1396 unsigned int i;
1397 u64 val;
1398
1399 if (!netif_running(dev))
1400 return;
1401
1402 for (i = 0; i < BGMAC_STATS_LEN; i++) {
1403 s = &bgmac_get_strings_stats[i];
1404 val = 0;
1405 if (s->size == 8)
1406 val = (u64)bgmac_read(bgmac, s->offset + 4) << 32;
1407 val |= bgmac_read(bgmac, s->offset);
1408 data[i] = val;
1409 }
1410}
1411
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001412static void bgmac_get_drvinfo(struct net_device *net_dev,
1413 struct ethtool_drvinfo *info)
1414{
1415 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
Jon Masonf6a95a22016-07-07 19:08:57 -04001416 strlcpy(info->bus_info, "AXI", sizeof(info->bus_info));
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001417}
1418
1419static const struct ethtool_ops bgmac_ethtool_ops = {
Florian Fainellif6613d42016-06-07 15:06:14 -07001420 .get_strings = bgmac_get_strings,
1421 .get_sset_count = bgmac_get_sset_count,
1422 .get_ethtool_stats = bgmac_get_ethtool_stats,
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001423 .get_drvinfo = bgmac_get_drvinfo,
Philippe Reynes904632a2016-06-19 22:37:06 +02001424 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1425 .set_link_ksettings = phy_ethtool_set_link_ksettings,
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001426};
1427
1428/**************************************************
Rafał Miłecki11e5e762013-03-07 01:53:28 +00001429 * MII
1430 **************************************************/
1431
Jon Mason1676aba5ef2016-11-04 01:11:00 -04001432void bgmac_adjust_link(struct net_device *net_dev)
Rafał Miłecki5824d2d2013-12-07 00:53:55 +01001433{
1434 struct bgmac *bgmac = netdev_priv(net_dev);
Philippe Reynesb21fcb22016-06-19 22:37:05 +02001435 struct phy_device *phy_dev = net_dev->phydev;
Rafał Miłecki5824d2d2013-12-07 00:53:55 +01001436 bool update = false;
1437
1438 if (phy_dev->link) {
1439 if (phy_dev->speed != bgmac->mac_speed) {
1440 bgmac->mac_speed = phy_dev->speed;
1441 update = true;
1442 }
1443
1444 if (phy_dev->duplex != bgmac->mac_duplex) {
1445 bgmac->mac_duplex = phy_dev->duplex;
1446 update = true;
1447 }
1448 }
1449
1450 if (update) {
1451 bgmac_mac_speed(bgmac);
1452 phy_print_status(phy_dev);
1453 }
1454}
Jon Mason1676aba5ef2016-11-04 01:11:00 -04001455EXPORT_SYMBOL_GPL(bgmac_adjust_link);
Rafał Miłecki5824d2d2013-12-07 00:53:55 +01001456
Jon Mason1676aba5ef2016-11-04 01:11:00 -04001457int bgmac_phy_connect_direct(struct bgmac *bgmac)
Rafał Miłeckic25b23b2015-03-20 23:14:31 +01001458{
1459 struct fixed_phy_status fphy_status = {
1460 .link = 1,
1461 .speed = SPEED_1000,
1462 .duplex = DUPLEX_FULL,
1463 };
1464 struct phy_device *phy_dev;
1465 int err;
1466
Fabio Estevam4db78d32015-09-02 13:25:59 -03001467 phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, -1, NULL);
Rafał Miłeckic25b23b2015-03-20 23:14:31 +01001468 if (!phy_dev || IS_ERR(phy_dev)) {
Jon Masond00a8282016-07-07 19:08:53 -04001469 dev_err(bgmac->dev, "Failed to register fixed PHY device\n");
Rafał Miłeckic25b23b2015-03-20 23:14:31 +01001470 return -ENODEV;
1471 }
1472
1473 err = phy_connect_direct(bgmac->net_dev, phy_dev, bgmac_adjust_link,
1474 PHY_INTERFACE_MODE_MII);
1475 if (err) {
Jon Masond00a8282016-07-07 19:08:53 -04001476 dev_err(bgmac->dev, "Connecting PHY failed\n");
Rafał Miłeckic25b23b2015-03-20 23:14:31 +01001477 return err;
1478 }
1479
Rafał Miłeckic25b23b2015-03-20 23:14:31 +01001480 return err;
1481}
Jon Mason1676aba5ef2016-11-04 01:11:00 -04001482EXPORT_SYMBOL_GPL(bgmac_phy_connect_direct);
Rafał Miłecki11e5e762013-03-07 01:53:28 +00001483
Rafał Miłecki34a51022017-01-31 19:37:54 +01001484struct bgmac *bgmac_alloc(struct device *dev)
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001485{
1486 struct net_device *net_dev;
1487 struct bgmac *bgmac;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001488
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001489 /* Allocation and references */
Rafał Miłecki34a51022017-01-31 19:37:54 +01001490 net_dev = devm_alloc_etherdev(dev, sizeof(*bgmac));
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001491 if (!net_dev)
Rafał Miłecki34a51022017-01-31 19:37:54 +01001492 return NULL;
Jon Masonf6a95a22016-07-07 19:08:57 -04001493
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001494 net_dev->netdev_ops = &bgmac_netdev_ops;
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00001495 net_dev->ethtool_ops = &bgmac_ethtool_ops;
Rafał Miłecki34a51022017-01-31 19:37:54 +01001496
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001497 bgmac = netdev_priv(net_dev);
Rafał Miłecki34a51022017-01-31 19:37:54 +01001498 bgmac->dev = dev;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001499 bgmac->net_dev = net_dev;
Rafał Miłecki34a51022017-01-31 19:37:54 +01001500
1501 return bgmac;
1502}
1503EXPORT_SYMBOL_GPL(bgmac_alloc);
1504
1505int bgmac_enet_probe(struct bgmac *bgmac)
1506{
1507 struct net_device *net_dev = bgmac->net_dev;
1508 int err;
1509
Jon Masonf6a95a22016-07-07 19:08:57 -04001510 net_dev->irq = bgmac->irq;
1511 SET_NETDEV_DEV(net_dev, bgmac->dev);
Joey Zhongf3537b32017-02-28 13:51:01 -05001512 dev_set_drvdata(bgmac->dev, bgmac);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001513
Tobias Klauser6850f8b2017-02-16 15:11:19 +01001514 if (!is_valid_ether_addr(net_dev->dev_addr)) {
Jon Masonf6a95a22016-07-07 19:08:57 -04001515 dev_err(bgmac->dev, "Invalid MAC addr: %pM\n",
Tobias Klauser6850f8b2017-02-16 15:11:19 +01001516 net_dev->dev_addr);
1517 eth_hw_addr_random(net_dev);
Jon Masonf6a95a22016-07-07 19:08:57 -04001518 dev_warn(bgmac->dev, "Using random MAC: %pM\n",
Tobias Klauser6850f8b2017-02-16 15:11:19 +01001519 net_dev->dev_addr);
Jon Masonf6a95a22016-07-07 19:08:57 -04001520 }
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001521
Jon Masonf6a95a22016-07-07 19:08:57 -04001522 /* This (reset &) enable is not preset in specs or reference driver but
1523 * Broadcom does it in arch PCI code when enabling fake PCI device.
1524 */
1525 bgmac_clk_enable(bgmac, 0);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001526
Rafał Miłecki1cb94db2016-08-17 23:00:30 +02001527 /* This seems to be fixing IRQ by assigning OOB #6 to the core */
Abhishek Shaha163bdb2017-07-14 00:34:08 +05301528 if (!(bgmac->feature_flags & BGMAC_FEAT_IDM_MASK)) {
1529 if (bgmac->feature_flags & BGMAC_FEAT_IRQ_ID_OOB_6)
1530 bgmac_idm_write(bgmac, BCMA_OOB_SEL_OUT_A30, 0x86);
1531 }
Rafał Miłecki1cb94db2016-08-17 23:00:30 +02001532
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001533 bgmac_chip_reset(bgmac);
1534
1535 err = bgmac_dma_alloc(bgmac);
1536 if (err) {
Jon Masond00a8282016-07-07 19:08:53 -04001537 dev_err(bgmac->dev, "Unable to alloc memory for DMA\n");
Rafał Miłecki34a51022017-01-31 19:37:54 +01001538 goto err_out;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001539 }
1540
1541 bgmac->int_mask = BGMAC_IS_ERRMASK | BGMAC_IS_RX | BGMAC_IS_TX_MASK;
Ralf Baechleedb15d82013-02-21 16:16:55 +01001542 if (bcm47xx_nvram_getenv("et0_no_txint", NULL, 0) == 0)
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001543 bgmac->int_mask &= ~BGMAC_IS_TX_MASK;
1544
Hauke Mehrtens62166422015-01-18 19:49:58 +01001545 netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT);
1546
Jon Mason1676aba5ef2016-11-04 01:11:00 -04001547 err = bgmac_phy_connect(bgmac);
Rafał Miłecki11e5e762013-03-07 01:53:28 +00001548 if (err) {
Jon Masond00a8282016-07-07 19:08:53 -04001549 dev_err(bgmac->dev, "Cannot connect to phy\n");
Jon Masonf6a95a22016-07-07 19:08:57 -04001550 goto err_dma_free;
Rafał Miłecki11e5e762013-03-07 01:53:28 +00001551 }
1552
Felix Fietkau9cde9452015-03-23 12:35:37 +01001553 net_dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1554 net_dev->hw_features = net_dev->features;
1555 net_dev->vlan_features = net_dev->features;
1556
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001557 err = register_netdev(bgmac->net_dev);
1558 if (err) {
Jon Masond00a8282016-07-07 19:08:53 -04001559 dev_err(bgmac->dev, "Cannot register net device\n");
Jon Mason55954f32016-07-07 19:08:55 -04001560 goto err_phy_disconnect;
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001561 }
1562
1563 netif_carrier_off(net_dev);
1564
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001565 return 0;
1566
Jon Mason55954f32016-07-07 19:08:55 -04001567err_phy_disconnect:
1568 phy_disconnect(net_dev->phydev);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001569err_dma_free:
1570 bgmac_dma_free(bgmac);
Rafał Miłecki34a51022017-01-31 19:37:54 +01001571err_out:
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001572
1573 return err;
1574}
Jon Masonf6a95a22016-07-07 19:08:57 -04001575EXPORT_SYMBOL_GPL(bgmac_enet_probe);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001576
Jon Masonf6a95a22016-07-07 19:08:57 -04001577void bgmac_enet_remove(struct bgmac *bgmac)
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001578{
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001579 unregister_netdev(bgmac->net_dev);
Jon Mason55954f32016-07-07 19:08:55 -04001580 phy_disconnect(bgmac->net_dev->phydev);
Hauke Mehrtens62166422015-01-18 19:49:58 +01001581 netif_napi_del(&bgmac->napi);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001582 bgmac_dma_free(bgmac);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001583 free_netdev(bgmac->net_dev);
1584}
Jon Masonf6a95a22016-07-07 19:08:57 -04001585EXPORT_SYMBOL_GPL(bgmac_enet_remove);
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001586
Joey Zhongf3537b32017-02-28 13:51:01 -05001587int bgmac_enet_suspend(struct bgmac *bgmac)
1588{
1589 if (!netif_running(bgmac->net_dev))
1590 return 0;
1591
1592 phy_stop(bgmac->net_dev->phydev);
1593
1594 netif_stop_queue(bgmac->net_dev);
1595
1596 napi_disable(&bgmac->napi);
1597
1598 netif_tx_lock(bgmac->net_dev);
1599 netif_device_detach(bgmac->net_dev);
1600 netif_tx_unlock(bgmac->net_dev);
1601
1602 bgmac_chip_intrs_off(bgmac);
1603 bgmac_chip_reset(bgmac);
1604 bgmac_dma_cleanup(bgmac);
1605
1606 return 0;
1607}
1608EXPORT_SYMBOL_GPL(bgmac_enet_suspend);
1609
1610int bgmac_enet_resume(struct bgmac *bgmac)
1611{
1612 int rc;
1613
1614 if (!netif_running(bgmac->net_dev))
1615 return 0;
1616
1617 rc = bgmac_dma_init(bgmac);
1618 if (rc)
1619 return rc;
1620
1621 bgmac_chip_init(bgmac);
1622
1623 napi_enable(&bgmac->napi);
1624
1625 netif_tx_lock(bgmac->net_dev);
1626 netif_device_attach(bgmac->net_dev);
1627 netif_tx_unlock(bgmac->net_dev);
1628
1629 netif_start_queue(bgmac->net_dev);
1630
1631 phy_start(bgmac->net_dev->phydev);
1632
1633 return 0;
1634}
1635EXPORT_SYMBOL_GPL(bgmac_enet_resume);
1636
Rafał Miłeckidd4544f2013-01-08 20:06:23 +00001637MODULE_AUTHOR("Rafał Miłecki");
1638MODULE_LICENSE("GPL");