blob: 0ec0da328215536656dad17b5cb17129804521cd [file] [log] [blame]
David Daney80ff0fd2009-05-05 17:35:21 -07001/*********************************************************************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2007 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26**********************************************************************/
27#include <linux/kernel.h>
28#include <linux/netdevice.h>
David Daney048316b2011-08-16 10:10:56 -070029#include <linux/interrupt.h>
David Daneyf8c26482010-02-15 12:13:17 -080030#include <linux/phy.h>
Christian Dietrich7a2eaf92011-06-04 17:35:58 +020031#include <linux/ratelimit.h>
David Daney80ff0fd2009-05-05 17:35:21 -070032#include <net/dst.h>
33
34#include <asm/octeon/octeon.h>
35
36#include "ethernet-defines.h"
37#include "octeon-ethernet.h"
David Daney80ff0fd2009-05-05 17:35:21 -070038#include "ethernet-util.h"
39
David Daneyaf866492011-11-22 14:47:00 +000040#include <asm/octeon/cvmx-helper.h>
David Daney80ff0fd2009-05-05 17:35:21 -070041
42#include <asm/octeon/cvmx-ipd-defs.h>
43#include <asm/octeon/cvmx-npi-defs.h>
David Daneyaf866492011-11-22 14:47:00 +000044#include <asm/octeon/cvmx-gmxx-defs.h>
David Daney80ff0fd2009-05-05 17:35:21 -070045
Aaro Koskinen54bf9172014-03-02 00:09:08 +020046static DEFINE_SPINLOCK(global_register_lock);
David Daney80ff0fd2009-05-05 17:35:21 -070047
48static int number_rgmii_ports;
49
50static void cvm_oct_rgmii_poll(struct net_device *dev)
51{
52 struct octeon_ethernet *priv = netdev_priv(dev);
David Daneyf8c26482010-02-15 12:13:17 -080053 unsigned long flags = 0;
David Daney80ff0fd2009-05-05 17:35:21 -070054 cvmx_helper_link_info_t link_info;
David Daneyf8c26482010-02-15 12:13:17 -080055 int use_global_register_lock = (priv->phydev == NULL);
David Daney80ff0fd2009-05-05 17:35:21 -070056
David Daneyf8c26482010-02-15 12:13:17 -080057 BUG_ON(in_interrupt());
58 if (use_global_register_lock) {
59 /*
60 * Take the global register lock since we are going to
61 * touch registers that affect more than one port.
62 */
63 spin_lock_irqsave(&global_register_lock, flags);
64 } else {
65 mutex_lock(&priv->phydev->bus->mdio_lock);
66 }
David Daney80ff0fd2009-05-05 17:35:21 -070067
68 link_info = cvmx_helper_link_get(priv->port);
69 if (link_info.u64 == priv->link_info) {
70
71 /*
72 * If the 10Mbps preamble workaround is supported and we're
73 * at 10Mbps we may need to do some special checking.
74 */
Aybuke Ozdemirf09d1442014-03-18 21:13:29 +020075 if (USE_10MBPS_PREAMBLE_WORKAROUND &&
76 (link_info.s.speed == 10)) {
David Daney80ff0fd2009-05-05 17:35:21 -070077
78 /*
79 * Read the GMXX_RXX_INT_REG[PCTERR] bit and
80 * see if we are getting preamble errors.
81 */
82 int interface = INTERFACE(priv->port);
83 int index = INDEX(priv->port);
84 union cvmx_gmxx_rxx_int_reg gmxx_rxx_int_reg;
85 gmxx_rxx_int_reg.u64 =
86 cvmx_read_csr(CVMX_GMXX_RXX_INT_REG
87 (index, interface));
88 if (gmxx_rxx_int_reg.s.pcterr) {
89
90 /*
91 * We are getting preamble errors at
92 * 10Mbps. Most likely the PHY is
93 * giving us packets with mis aligned
94 * preambles. In order to get these
95 * packets we need to disable preamble
96 * checking and do it in software.
97 */
98 union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl;
99 union cvmx_ipd_sub_port_fcs ipd_sub_port_fcs;
100
101 /* Disable preamble checking */
102 gmxx_rxx_frm_ctl.u64 =
103 cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL
104 (index, interface));
105 gmxx_rxx_frm_ctl.s.pre_chk = 0;
106 cvmx_write_csr(CVMX_GMXX_RXX_FRM_CTL
107 (index, interface),
108 gmxx_rxx_frm_ctl.u64);
109
110 /* Disable FCS stripping */
111 ipd_sub_port_fcs.u64 =
112 cvmx_read_csr(CVMX_IPD_SUB_PORT_FCS);
113 ipd_sub_port_fcs.s.port_bit &=
114 0xffffffffull ^ (1ull << priv->port);
115 cvmx_write_csr(CVMX_IPD_SUB_PORT_FCS,
116 ipd_sub_port_fcs.u64);
117
118 /* Clear any error bits */
119 cvmx_write_csr(CVMX_GMXX_RXX_INT_REG
120 (index, interface),
121 gmxx_rxx_int_reg.u64);
Christian Dietrich7a2eaf92011-06-04 17:35:58 +0200122 printk_ratelimited("%s: Using 10Mbps with software "
123 "preamble removal\n",
124 dev->name);
David Daney80ff0fd2009-05-05 17:35:21 -0700125 }
126 }
David Daneyf8c26482010-02-15 12:13:17 -0800127
128 if (use_global_register_lock)
129 spin_unlock_irqrestore(&global_register_lock, flags);
130 else
131 mutex_unlock(&priv->phydev->bus->mdio_lock);
David Daney80ff0fd2009-05-05 17:35:21 -0700132 return;
133 }
134
135 /* If the 10Mbps preamble workaround is allowed we need to on
136 preamble checking, FCS stripping, and clear error bits on
137 every speed change. If errors occur during 10Mbps operation
138 the above code will change this stuff */
139 if (USE_10MBPS_PREAMBLE_WORKAROUND) {
140
141 union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl;
142 union cvmx_ipd_sub_port_fcs ipd_sub_port_fcs;
143 union cvmx_gmxx_rxx_int_reg gmxx_rxx_int_reg;
144 int interface = INTERFACE(priv->port);
145 int index = INDEX(priv->port);
146
147 /* Enable preamble checking */
148 gmxx_rxx_frm_ctl.u64 =
149 cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface));
150 gmxx_rxx_frm_ctl.s.pre_chk = 1;
151 cvmx_write_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface),
152 gmxx_rxx_frm_ctl.u64);
153 /* Enable FCS stripping */
154 ipd_sub_port_fcs.u64 = cvmx_read_csr(CVMX_IPD_SUB_PORT_FCS);
155 ipd_sub_port_fcs.s.port_bit |= 1ull << priv->port;
156 cvmx_write_csr(CVMX_IPD_SUB_PORT_FCS, ipd_sub_port_fcs.u64);
157 /* Clear any error bits */
158 gmxx_rxx_int_reg.u64 =
159 cvmx_read_csr(CVMX_GMXX_RXX_INT_REG(index, interface));
160 cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(index, interface),
161 gmxx_rxx_int_reg.u64);
162 }
David Daneyf6ed1b32009-10-14 12:04:42 -0700163 if (priv->phydev == NULL) {
164 link_info = cvmx_helper_link_autoconf(priv->port);
165 priv->link_info = link_info.u64;
166 }
David Daneyf8c26482010-02-15 12:13:17 -0800167
168 if (use_global_register_lock)
169 spin_unlock_irqrestore(&global_register_lock, flags);
bahar sahin4504b1b2014-03-03 03:56:03 +0200170 else
David Daneyf8c26482010-02-15 12:13:17 -0800171 mutex_unlock(&priv->phydev->bus->mdio_lock);
David Daney80ff0fd2009-05-05 17:35:21 -0700172
David Daneyf6ed1b32009-10-14 12:04:42 -0700173 if (priv->phydev == NULL) {
174 /* Tell core. */
175 if (link_info.s.link_up) {
176 if (!netif_carrier_ok(dev))
177 netif_carrier_on(dev);
178 if (priv->queue != -1)
Christian Dietrich7a2eaf92011-06-04 17:35:58 +0200179 printk_ratelimited("%s: %u Mbps %s duplex, "
180 "port %2d, queue %2d\n",
181 dev->name, link_info.s.speed,
182 (link_info.s.full_duplex) ?
183 "Full" : "Half",
184 priv->port, priv->queue);
David Daneyf6ed1b32009-10-14 12:04:42 -0700185 else
Christian Dietrich7a2eaf92011-06-04 17:35:58 +0200186 printk_ratelimited("%s: %u Mbps %s duplex, "
187 "port %2d, POW\n",
188 dev->name, link_info.s.speed,
189 (link_info.s.full_duplex) ?
190 "Full" : "Half",
191 priv->port);
David Daneyf6ed1b32009-10-14 12:04:42 -0700192 } else {
193 if (netif_carrier_ok(dev))
194 netif_carrier_off(dev);
Christian Dietrich7a2eaf92011-06-04 17:35:58 +0200195 printk_ratelimited("%s: Link down\n", dev->name);
David Daneyf6ed1b32009-10-14 12:04:42 -0700196 }
David Daney80ff0fd2009-05-05 17:35:21 -0700197 }
198}
199
200static irqreturn_t cvm_oct_rgmii_rml_interrupt(int cpl, void *dev_id)
201{
202 union cvmx_npi_rsl_int_blocks rsl_int_blocks;
203 int index;
204 irqreturn_t return_status = IRQ_NONE;
205
206 rsl_int_blocks.u64 = cvmx_read_csr(CVMX_NPI_RSL_INT_BLOCKS);
207
208 /* Check and see if this interrupt was caused by the GMX0 block */
209 if (rsl_int_blocks.s.gmx0) {
210
211 int interface = 0;
212 /* Loop through every port of this interface */
213 for (index = 0;
214 index < cvmx_helper_ports_on_interface(interface);
215 index++) {
216
217 /* Read the GMX interrupt status bits */
218 union cvmx_gmxx_rxx_int_reg gmx_rx_int_reg;
219 gmx_rx_int_reg.u64 =
220 cvmx_read_csr(CVMX_GMXX_RXX_INT_REG
221 (index, interface));
222 gmx_rx_int_reg.u64 &=
223 cvmx_read_csr(CVMX_GMXX_RXX_INT_EN
224 (index, interface));
225 /* Poll the port if inband status changed */
226 if (gmx_rx_int_reg.s.phy_dupx
227 || gmx_rx_int_reg.s.phy_link
228 || gmx_rx_int_reg.s.phy_spd) {
229
230 struct net_device *dev =
231 cvm_oct_device[cvmx_helper_get_ipd_port
232 (interface, index)];
David Daneyf8c26482010-02-15 12:13:17 -0800233 struct octeon_ethernet *priv = netdev_priv(dev);
234
Aybuke Ozdemirf09d1442014-03-18 21:13:29 +0200235 if (dev &&
236 !atomic_read(&cvm_oct_poll_queue_stopping))
237 queue_work(cvm_oct_poll_queue,
238 &priv->port_work);
David Daneyf8c26482010-02-15 12:13:17 -0800239
David Daney80ff0fd2009-05-05 17:35:21 -0700240 gmx_rx_int_reg.u64 = 0;
241 gmx_rx_int_reg.s.phy_dupx = 1;
242 gmx_rx_int_reg.s.phy_link = 1;
243 gmx_rx_int_reg.s.phy_spd = 1;
244 cvmx_write_csr(CVMX_GMXX_RXX_INT_REG
245 (index, interface),
246 gmx_rx_int_reg.u64);
247 return_status = IRQ_HANDLED;
248 }
249 }
250 }
251
252 /* Check and see if this interrupt was caused by the GMX1 block */
253 if (rsl_int_blocks.s.gmx1) {
254
255 int interface = 1;
256 /* Loop through every port of this interface */
257 for (index = 0;
258 index < cvmx_helper_ports_on_interface(interface);
259 index++) {
260
261 /* Read the GMX interrupt status bits */
262 union cvmx_gmxx_rxx_int_reg gmx_rx_int_reg;
263 gmx_rx_int_reg.u64 =
264 cvmx_read_csr(CVMX_GMXX_RXX_INT_REG
265 (index, interface));
266 gmx_rx_int_reg.u64 &=
267 cvmx_read_csr(CVMX_GMXX_RXX_INT_EN
268 (index, interface));
269 /* Poll the port if inband status changed */
270 if (gmx_rx_int_reg.s.phy_dupx
271 || gmx_rx_int_reg.s.phy_link
272 || gmx_rx_int_reg.s.phy_spd) {
273
274 struct net_device *dev =
275 cvm_oct_device[cvmx_helper_get_ipd_port
276 (interface, index)];
David Daneyf8c26482010-02-15 12:13:17 -0800277 struct octeon_ethernet *priv = netdev_priv(dev);
278
Aybuke Ozdemirf09d1442014-03-18 21:13:29 +0200279 if (dev &&
280 !atomic_read(&cvm_oct_poll_queue_stopping))
281 queue_work(cvm_oct_poll_queue,
282 &priv->port_work);
David Daneyf8c26482010-02-15 12:13:17 -0800283
David Daney80ff0fd2009-05-05 17:35:21 -0700284 gmx_rx_int_reg.u64 = 0;
285 gmx_rx_int_reg.s.phy_dupx = 1;
286 gmx_rx_int_reg.s.phy_link = 1;
287 gmx_rx_int_reg.s.phy_spd = 1;
288 cvmx_write_csr(CVMX_GMXX_RXX_INT_REG
289 (index, interface),
290 gmx_rx_int_reg.u64);
291 return_status = IRQ_HANDLED;
292 }
293 }
294 }
295 return return_status;
296}
297
David Daneyf696a102009-06-23 11:34:08 -0700298int cvm_oct_rgmii_open(struct net_device *dev)
David Daney80ff0fd2009-05-05 17:35:21 -0700299{
300 union cvmx_gmxx_prtx_cfg gmx_cfg;
301 struct octeon_ethernet *priv = netdev_priv(dev);
302 int interface = INTERFACE(priv->port);
303 int index = INDEX(priv->port);
304 cvmx_helper_link_info_t link_info;
305
306 gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
307 gmx_cfg.s.en = 1;
308 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
309
310 if (!octeon_is_simulation()) {
311 link_info = cvmx_helper_link_get(priv->port);
312 if (!link_info.s.link_up)
313 netif_carrier_off(dev);
314 }
315
316 return 0;
317}
318
David Daneyf696a102009-06-23 11:34:08 -0700319int cvm_oct_rgmii_stop(struct net_device *dev)
David Daney80ff0fd2009-05-05 17:35:21 -0700320{
321 union cvmx_gmxx_prtx_cfg gmx_cfg;
322 struct octeon_ethernet *priv = netdev_priv(dev);
323 int interface = INTERFACE(priv->port);
324 int index = INDEX(priv->port);
325
326 gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
327 gmx_cfg.s.en = 0;
328 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
329 return 0;
330}
331
David Daneyf8c26482010-02-15 12:13:17 -0800332static void cvm_oct_rgmii_immediate_poll(struct work_struct *work)
333{
Aybuke Ozdemirf09d1442014-03-18 21:13:29 +0200334 struct octeon_ethernet *priv =
335 container_of(work, struct octeon_ethernet, port_work);
David Daneyf8c26482010-02-15 12:13:17 -0800336 cvm_oct_rgmii_poll(cvm_oct_device[priv->port]);
337}
338
David Daney80ff0fd2009-05-05 17:35:21 -0700339int cvm_oct_rgmii_init(struct net_device *dev)
340{
341 struct octeon_ethernet *priv = netdev_priv(dev);
342 int r;
343
344 cvm_oct_common_init(dev);
David Daneyf696a102009-06-23 11:34:08 -0700345 dev->netdev_ops->ndo_stop(dev);
David Daneyf8c26482010-02-15 12:13:17 -0800346 INIT_WORK(&priv->port_work, cvm_oct_rgmii_immediate_poll);
David Daney80ff0fd2009-05-05 17:35:21 -0700347 /*
348 * Due to GMX errata in CN3XXX series chips, it is necessary
Roel Kluin82c7c112009-09-18 12:59:22 -0700349 * to take the link down immediately when the PHY changes
David Daney80ff0fd2009-05-05 17:35:21 -0700350 * state. In order to do this we call the poll function every
351 * time the RGMII inband status changes. This may cause
352 * problems if the PHY doesn't implement inband status
353 * properly.
354 */
355 if (number_rgmii_ports == 0) {
356 r = request_irq(OCTEON_IRQ_RML, cvm_oct_rgmii_rml_interrupt,
357 IRQF_SHARED, "RGMII", &number_rgmii_ports);
Roel Kluin82c7c112009-09-18 12:59:22 -0700358 if (r != 0)
359 return r;
David Daney80ff0fd2009-05-05 17:35:21 -0700360 }
361 number_rgmii_ports++;
362
363 /*
364 * Only true RGMII ports need to be polled. In GMII mode, port
365 * 0 is really a RGMII port.
366 */
367 if (((priv->imode == CVMX_HELPER_INTERFACE_MODE_GMII)
368 && (priv->port == 0))
369 || (priv->imode == CVMX_HELPER_INTERFACE_MODE_RGMII)) {
370
371 if (!octeon_is_simulation()) {
372
373 union cvmx_gmxx_rxx_int_en gmx_rx_int_en;
374 int interface = INTERFACE(priv->port);
375 int index = INDEX(priv->port);
376
377 /*
378 * Enable interrupts on inband status changes
379 * for this port.
380 */
Aaro Koskinen7cc4fa12013-09-05 21:44:01 +0300381 gmx_rx_int_en.u64 = 0;
David Daney80ff0fd2009-05-05 17:35:21 -0700382 gmx_rx_int_en.s.phy_dupx = 1;
383 gmx_rx_int_en.s.phy_link = 1;
384 gmx_rx_int_en.s.phy_spd = 1;
385 cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(index, interface),
386 gmx_rx_int_en.u64);
387 priv->poll = cvm_oct_rgmii_poll;
388 }
389 }
390
391 return 0;
392}
393
394void cvm_oct_rgmii_uninit(struct net_device *dev)
395{
396 struct octeon_ethernet *priv = netdev_priv(dev);
397 cvm_oct_common_uninit(dev);
398
399 /*
400 * Only true RGMII ports need to be polled. In GMII mode, port
401 * 0 is really a RGMII port.
402 */
403 if (((priv->imode == CVMX_HELPER_INTERFACE_MODE_GMII)
404 && (priv->port == 0))
405 || (priv->imode == CVMX_HELPER_INTERFACE_MODE_RGMII)) {
406
407 if (!octeon_is_simulation()) {
408
409 union cvmx_gmxx_rxx_int_en gmx_rx_int_en;
410 int interface = INTERFACE(priv->port);
411 int index = INDEX(priv->port);
412
413 /*
414 * Disable interrupts on inband status changes
415 * for this port.
416 */
417 gmx_rx_int_en.u64 =
418 cvmx_read_csr(CVMX_GMXX_RXX_INT_EN
419 (index, interface));
420 gmx_rx_int_en.s.phy_dupx = 0;
421 gmx_rx_int_en.s.phy_link = 0;
422 gmx_rx_int_en.s.phy_spd = 0;
423 cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(index, interface),
424 gmx_rx_int_en.u64);
425 }
426 }
427
428 /* Remove the interrupt handler when the last port is removed. */
429 number_rgmii_ports--;
430 if (number_rgmii_ports == 0)
431 free_irq(OCTEON_IRQ_RML, &number_rgmii_ports);
David Daneyf8c26482010-02-15 12:13:17 -0800432 cancel_work_sync(&priv->port_work);
David Daney80ff0fd2009-05-05 17:35:21 -0700433}