blob: a2d778aefadfe22af51ade4c888275a9980aba56 [file] [log] [blame]
Lendacky, Thomas4d874b32014-06-05 09:15:12 -05001/*
2 * AMD 10Gb Ethernet PHY driver
3 *
4 * This file is available to you under your choice of the following two
5 * licenses:
6 *
7 * License 1: GPLv2
8 *
9 * Copyright (c) 2014 Advanced Micro Devices, Inc.
10 *
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
15 *
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 *
24 *
25 * License 2: Modified BSD
26 *
27 * Copyright (c) 2014 Advanced Micro Devices, Inc.
28 * All rights reserved.
29 *
30 * Redistribution and use in source and binary forms, with or without
31 * modification, are permitted provided that the following conditions are met:
32 * * Redistributions of source code must retain the above copyright
33 * notice, this list of conditions and the following disclaimer.
34 * * Redistributions in binary form must reproduce the above copyright
35 * notice, this list of conditions and the following disclaimer in the
36 * documentation and/or other materials provided with the distribution.
37 * * Neither the name of Advanced Micro Devices, Inc. nor the
38 * names of its contributors may be used to endorse or promote products
39 * derived from this software without specific prior written permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
42 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
45 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
46 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
47 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
48 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
49 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
50 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51 */
52
53#include <linux/kernel.h>
54#include <linux/device.h>
55#include <linux/platform_device.h>
56#include <linux/string.h>
57#include <linux/errno.h>
58#include <linux/unistd.h>
59#include <linux/slab.h>
60#include <linux/interrupt.h>
61#include <linux/init.h>
62#include <linux/delay.h>
63#include <linux/netdevice.h>
64#include <linux/etherdevice.h>
65#include <linux/skbuff.h>
66#include <linux/mm.h>
67#include <linux/module.h>
68#include <linux/mii.h>
69#include <linux/ethtool.h>
70#include <linux/phy.h>
71#include <linux/mdio.h>
72#include <linux/io.h>
73#include <linux/of.h>
74#include <linux/of_platform.h>
75#include <linux/of_device.h>
76#include <linux/uaccess.h>
Lendacky, Thomas4d874b32014-06-05 09:15:12 -050077
78
79MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
80MODULE_LICENSE("Dual BSD/GPL");
81MODULE_VERSION("1.0.0-a");
82MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
83
84#define XGBE_PHY_ID 0x000162d0
85#define XGBE_PHY_MASK 0xfffffff0
86
Lendacky, Thomasf0476042014-07-29 08:57:25 -050087#define XGBE_PHY_SPEEDSET_PROPERTY "amd,speed-set"
88
Lendacky, Thomas4d874b32014-06-05 09:15:12 -050089#define XGBE_AN_INT_CMPLT 0x01
90#define XGBE_AN_INC_LINK 0x02
91#define XGBE_AN_PG_RCV 0x04
92
93#define XNP_MCF_NULL_MESSAGE 0x001
94#define XNP_ACK_PROCESSED (1 << 12)
95#define XNP_MP_FORMATTED (1 << 13)
96#define XNP_NP_EXCHANGE (1 << 15)
97
Lendacky, Thomas169a6302014-07-29 08:57:37 -050098#define XGBE_PHY_RATECHANGE_COUNT 100
99
Lendacky, Thomas4d874b32014-06-05 09:15:12 -0500100#ifndef MDIO_PMA_10GBR_PMD_CTRL
101#define MDIO_PMA_10GBR_PMD_CTRL 0x0096
102#endif
103#ifndef MDIO_PMA_10GBR_FEC_CTRL
104#define MDIO_PMA_10GBR_FEC_CTRL 0x00ab
105#endif
106#ifndef MDIO_AN_XNP
107#define MDIO_AN_XNP 0x0016
108#endif
109
110#ifndef MDIO_AN_INTMASK
111#define MDIO_AN_INTMASK 0x8001
112#endif
113#ifndef MDIO_AN_INT
114#define MDIO_AN_INT 0x8002
115#endif
116
117#ifndef MDIO_CTRL1_SPEED1G
118#define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
119#endif
120
121/* SerDes integration register offsets */
122#define SIR0_STATUS 0x0040
123#define SIR1_SPEED 0x0000
124
125/* SerDes integration register entry bit positions and sizes */
126#define SIR0_STATUS_RX_READY_INDEX 0
127#define SIR0_STATUS_RX_READY_WIDTH 1
128#define SIR0_STATUS_TX_READY_INDEX 8
129#define SIR0_STATUS_TX_READY_WIDTH 1
130#define SIR1_SPEED_DATARATE_INDEX 4
131#define SIR1_SPEED_DATARATE_WIDTH 2
132#define SIR1_SPEED_PI_SPD_SEL_INDEX 12
133#define SIR1_SPEED_PI_SPD_SEL_WIDTH 4
134#define SIR1_SPEED_PLLSEL_INDEX 3
135#define SIR1_SPEED_PLLSEL_WIDTH 1
136#define SIR1_SPEED_RATECHANGE_INDEX 6
137#define SIR1_SPEED_RATECHANGE_WIDTH 1
138#define SIR1_SPEED_TXAMP_INDEX 8
139#define SIR1_SPEED_TXAMP_WIDTH 4
140#define SIR1_SPEED_WORDMODE_INDEX 0
141#define SIR1_SPEED_WORDMODE_WIDTH 3
142
143#define SPEED_10000_CDR 0x7
144#define SPEED_10000_PLL 0x1
145#define SPEED_10000_RATE 0x0
146#define SPEED_10000_TXAMP 0xa
147#define SPEED_10000_WORD 0x7
148
149#define SPEED_2500_CDR 0x2
150#define SPEED_2500_PLL 0x0
Lendacky, Thomasf0476042014-07-29 08:57:25 -0500151#define SPEED_2500_RATE 0x1
Lendacky, Thomas4d874b32014-06-05 09:15:12 -0500152#define SPEED_2500_TXAMP 0xf
153#define SPEED_2500_WORD 0x1
154
155#define SPEED_1000_CDR 0x2
156#define SPEED_1000_PLL 0x0
157#define SPEED_1000_RATE 0x3
158#define SPEED_1000_TXAMP 0xf
159#define SPEED_1000_WORD 0x1
160
161
162/* SerDes RxTx register offsets */
163#define RXTX_REG20 0x0050
164#define RXTX_REG114 0x01c8
165
166/* SerDes RxTx register entry bit positions and sizes */
167#define RXTX_REG20_BLWC_ENA_INDEX 2
168#define RXTX_REG20_BLWC_ENA_WIDTH 1
169#define RXTX_REG114_PQ_REG_INDEX 9
170#define RXTX_REG114_PQ_REG_WIDTH 7
171
172#define RXTX_10000_BLWC 0
173#define RXTX_10000_PQ 0x1e
174
175#define RXTX_2500_BLWC 1
176#define RXTX_2500_PQ 0xa
177
178#define RXTX_1000_BLWC 1
179#define RXTX_1000_PQ 0xa
180
181/* Bit setting and getting macros
182 * The get macro will extract the current bit field value from within
183 * the variable
184 *
185 * The set macro will clear the current bit field value within the
186 * variable and then set the bit field of the variable to the
187 * specified value
188 */
189#define GET_BITS(_var, _index, _width) \
190 (((_var) >> (_index)) & ((0x1 << (_width)) - 1))
191
192#define SET_BITS(_var, _index, _width, _val) \
193do { \
194 (_var) &= ~(((0x1 << (_width)) - 1) << (_index)); \
195 (_var) |= (((_val) & ((0x1 << (_width)) - 1)) << (_index)); \
196} while (0)
197
Lendacky, Thomas169a6302014-07-29 08:57:37 -0500198#define XSIR_GET_BITS(_var, _prefix, _field) \
199 GET_BITS((_var), \
200 _prefix##_##_field##_INDEX, \
201 _prefix##_##_field##_WIDTH)
202
203#define XSIR_SET_BITS(_var, _prefix, _field, _val) \
204 SET_BITS((_var), \
205 _prefix##_##_field##_INDEX, \
206 _prefix##_##_field##_WIDTH, (_val))
207
Lendacky, Thomas4d874b32014-06-05 09:15:12 -0500208/* Macros for reading or writing SerDes integration registers
209 * The ioread macros will get bit fields or full values using the
210 * register definitions formed using the input names
211 *
212 * The iowrite macros will set bit fields or full values using the
213 * register definitions formed using the input names
214 */
215#define XSIR0_IOREAD(_priv, _reg) \
216 ioread16((_priv)->sir0_regs + _reg)
217
218#define XSIR0_IOREAD_BITS(_priv, _reg, _field) \
219 GET_BITS(XSIR0_IOREAD((_priv), _reg), \
220 _reg##_##_field##_INDEX, \
221 _reg##_##_field##_WIDTH)
222
223#define XSIR0_IOWRITE(_priv, _reg, _val) \
224 iowrite16((_val), (_priv)->sir0_regs + _reg)
225
226#define XSIR0_IOWRITE_BITS(_priv, _reg, _field, _val) \
227do { \
228 u16 reg_val = XSIR0_IOREAD((_priv), _reg); \
229 SET_BITS(reg_val, \
230 _reg##_##_field##_INDEX, \
231 _reg##_##_field##_WIDTH, (_val)); \
232 XSIR0_IOWRITE((_priv), _reg, reg_val); \
233} while (0)
234
235#define XSIR1_IOREAD(_priv, _reg) \
236 ioread16((_priv)->sir1_regs + _reg)
237
238#define XSIR1_IOREAD_BITS(_priv, _reg, _field) \
239 GET_BITS(XSIR1_IOREAD((_priv), _reg), \
240 _reg##_##_field##_INDEX, \
241 _reg##_##_field##_WIDTH)
242
243#define XSIR1_IOWRITE(_priv, _reg, _val) \
244 iowrite16((_val), (_priv)->sir1_regs + _reg)
245
246#define XSIR1_IOWRITE_BITS(_priv, _reg, _field, _val) \
247do { \
248 u16 reg_val = XSIR1_IOREAD((_priv), _reg); \
249 SET_BITS(reg_val, \
250 _reg##_##_field##_INDEX, \
251 _reg##_##_field##_WIDTH, (_val)); \
252 XSIR1_IOWRITE((_priv), _reg, reg_val); \
253} while (0)
254
255
256/* Macros for reading or writing SerDes RxTx registers
257 * The ioread macros will get bit fields or full values using the
258 * register definitions formed using the input names
259 *
260 * The iowrite macros will set bit fields or full values using the
261 * register definitions formed using the input names
262 */
263#define XRXTX_IOREAD(_priv, _reg) \
264 ioread16((_priv)->rxtx_regs + _reg)
265
266#define XRXTX_IOREAD_BITS(_priv, _reg, _field) \
267 GET_BITS(XRXTX_IOREAD((_priv), _reg), \
268 _reg##_##_field##_INDEX, \
269 _reg##_##_field##_WIDTH)
270
271#define XRXTX_IOWRITE(_priv, _reg, _val) \
272 iowrite16((_val), (_priv)->rxtx_regs + _reg)
273
274#define XRXTX_IOWRITE_BITS(_priv, _reg, _field, _val) \
275do { \
276 u16 reg_val = XRXTX_IOREAD((_priv), _reg); \
277 SET_BITS(reg_val, \
278 _reg##_##_field##_INDEX, \
279 _reg##_##_field##_WIDTH, (_val)); \
280 XRXTX_IOWRITE((_priv), _reg, reg_val); \
281} while (0)
282
283
284enum amd_xgbe_phy_an {
285 AMD_XGBE_AN_READY = 0,
286 AMD_XGBE_AN_START,
287 AMD_XGBE_AN_EVENT,
288 AMD_XGBE_AN_PAGE_RECEIVED,
289 AMD_XGBE_AN_INCOMPAT_LINK,
290 AMD_XGBE_AN_COMPLETE,
291 AMD_XGBE_AN_NO_LINK,
292 AMD_XGBE_AN_EXIT,
293 AMD_XGBE_AN_ERROR,
294};
295
296enum amd_xgbe_phy_rx {
297 AMD_XGBE_RX_READY = 0,
298 AMD_XGBE_RX_BPA,
299 AMD_XGBE_RX_XNP,
300 AMD_XGBE_RX_COMPLETE,
301};
302
303enum amd_xgbe_phy_mode {
304 AMD_XGBE_MODE_KR,
305 AMD_XGBE_MODE_KX,
306};
307
Lendacky, Thomasf0476042014-07-29 08:57:25 -0500308enum amd_xgbe_phy_speedset {
309 AMD_XGBE_PHY_SPEEDSET_1000_10000,
310 AMD_XGBE_PHY_SPEEDSET_2500_10000,
311};
312
Lendacky, Thomas4d874b32014-06-05 09:15:12 -0500313struct amd_xgbe_phy_priv {
314 struct platform_device *pdev;
315 struct device *dev;
316
317 struct phy_device *phydev;
318
319 /* SerDes related mmio resources */
320 struct resource *rxtx_res;
321 struct resource *sir0_res;
322 struct resource *sir1_res;
323
324 /* SerDes related mmio registers */
325 void __iomem *rxtx_regs; /* SerDes Rx/Tx CSRs */
326 void __iomem *sir0_regs; /* SerDes integration registers (1/2) */
327 void __iomem *sir1_regs; /* SerDes integration registers (2/2) */
328
329 /* Maintain link status for re-starting auto-negotiation */
330 unsigned int link;
331 enum amd_xgbe_phy_mode mode;
Lendacky, Thomasf0476042014-07-29 08:57:25 -0500332 unsigned int speed_set;
Lendacky, Thomas4d874b32014-06-05 09:15:12 -0500333
334 /* Auto-negotiation state machine support */
335 struct mutex an_mutex;
336 enum amd_xgbe_phy_an an_result;
337 enum amd_xgbe_phy_an an_state;
338 enum amd_xgbe_phy_rx kr_state;
339 enum amd_xgbe_phy_rx kx_state;
340 struct work_struct an_work;
341 struct workqueue_struct *an_workqueue;
342};
343
344static int amd_xgbe_an_enable_kr_training(struct phy_device *phydev)
345{
346 int ret;
347
348 ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
349 if (ret < 0)
350 return ret;
351
352 ret |= 0x02;
353 phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
354
355 return 0;
356}
357
358static int amd_xgbe_an_disable_kr_training(struct phy_device *phydev)
359{
360 int ret;
361
362 ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
363 if (ret < 0)
364 return ret;
365
366 ret &= ~0x02;
367 phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
368
369 return 0;
370}
371
372static int amd_xgbe_phy_pcs_power_cycle(struct phy_device *phydev)
373{
374 int ret;
375
376 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
377 if (ret < 0)
378 return ret;
379
380 ret |= MDIO_CTRL1_LPOWER;
381 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
382
383 usleep_range(75, 100);
384
385 ret &= ~MDIO_CTRL1_LPOWER;
386 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
387
388 return 0;
389}
390
391static void amd_xgbe_phy_serdes_start_ratechange(struct phy_device *phydev)
392{
393 struct amd_xgbe_phy_priv *priv = phydev->priv;
394
395 /* Assert Rx and Tx ratechange */
396 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, RATECHANGE, 1);
397}
398
399static void amd_xgbe_phy_serdes_complete_ratechange(struct phy_device *phydev)
400{
401 struct amd_xgbe_phy_priv *priv = phydev->priv;
Lendacky, Thomas169a6302014-07-29 08:57:37 -0500402 unsigned int wait;
403 u16 status;
Lendacky, Thomas4d874b32014-06-05 09:15:12 -0500404
405 /* Release Rx and Tx ratechange */
406 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, RATECHANGE, 0);
407
408 /* Wait for Rx and Tx ready */
Lendacky, Thomas169a6302014-07-29 08:57:37 -0500409 wait = XGBE_PHY_RATECHANGE_COUNT;
410 while (wait--) {
Lendacky, Thomas4d874b32014-06-05 09:15:12 -0500411 usleep_range(10, 20);
Lendacky, Thomas169a6302014-07-29 08:57:37 -0500412
413 status = XSIR0_IOREAD(priv, SIR0_STATUS);
414 if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) &&
415 XSIR_GET_BITS(status, SIR0_STATUS, TX_READY))
416 return;
417 }
418
419 netdev_err(phydev->attached_dev, "SerDes rx/tx not ready (%#hx)\n",
420 status);
Lendacky, Thomas4d874b32014-06-05 09:15:12 -0500421}
422
423static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
424{
425 struct amd_xgbe_phy_priv *priv = phydev->priv;
426 int ret;
427
428 /* Enable KR training */
429 ret = amd_xgbe_an_enable_kr_training(phydev);
430 if (ret < 0)
431 return ret;
432
433 /* Set PCS to KR/10G speed */
434 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
435 if (ret < 0)
436 return ret;
437
438 ret &= ~MDIO_PCS_CTRL2_TYPE;
439 ret |= MDIO_PCS_CTRL2_10GBR;
440 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
441
442 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
443 if (ret < 0)
444 return ret;
445
446 ret &= ~MDIO_CTRL1_SPEEDSEL;
447 ret |= MDIO_CTRL1_SPEED10G;
448 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
449
450 ret = amd_xgbe_phy_pcs_power_cycle(phydev);
451 if (ret < 0)
452 return ret;
453
454 /* Set SerDes to 10G speed */
455 amd_xgbe_phy_serdes_start_ratechange(phydev);
456
457 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_10000_RATE);
458 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_10000_WORD);
459 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP, SPEED_10000_TXAMP);
460 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_10000_PLL);
461 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PI_SPD_SEL, SPEED_10000_CDR);
462
463 XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA, RXTX_10000_BLWC);
464 XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, RXTX_10000_PQ);
465
466 amd_xgbe_phy_serdes_complete_ratechange(phydev);
467
468 priv->mode = AMD_XGBE_MODE_KR;
469
470 return 0;
471}
472
473static int amd_xgbe_phy_gmii_2500_mode(struct phy_device *phydev)
474{
475 struct amd_xgbe_phy_priv *priv = phydev->priv;
476 int ret;
477
478 /* Disable KR training */
479 ret = amd_xgbe_an_disable_kr_training(phydev);
480 if (ret < 0)
481 return ret;
482
483 /* Set PCS to KX/1G speed */
484 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
485 if (ret < 0)
486 return ret;
487
488 ret &= ~MDIO_PCS_CTRL2_TYPE;
489 ret |= MDIO_PCS_CTRL2_10GBX;
490 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
491
492 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
493 if (ret < 0)
494 return ret;
495
496 ret &= ~MDIO_CTRL1_SPEEDSEL;
497 ret |= MDIO_CTRL1_SPEED1G;
498 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
499
500 ret = amd_xgbe_phy_pcs_power_cycle(phydev);
501 if (ret < 0)
502 return ret;
503
504 /* Set SerDes to 2.5G speed */
505 amd_xgbe_phy_serdes_start_ratechange(phydev);
506
507 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_2500_RATE);
508 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_2500_WORD);
509 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP, SPEED_2500_TXAMP);
510 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_2500_PLL);
511 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PI_SPD_SEL, SPEED_2500_CDR);
512
513 XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA, RXTX_2500_BLWC);
514 XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, RXTX_2500_PQ);
515
516 amd_xgbe_phy_serdes_complete_ratechange(phydev);
517
518 priv->mode = AMD_XGBE_MODE_KX;
519
520 return 0;
521}
522
523static int amd_xgbe_phy_gmii_mode(struct phy_device *phydev)
524{
525 struct amd_xgbe_phy_priv *priv = phydev->priv;
526 int ret;
527
528 /* Disable KR training */
529 ret = amd_xgbe_an_disable_kr_training(phydev);
530 if (ret < 0)
531 return ret;
532
533 /* Set PCS to KX/1G speed */
534 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
535 if (ret < 0)
536 return ret;
537
538 ret &= ~MDIO_PCS_CTRL2_TYPE;
539 ret |= MDIO_PCS_CTRL2_10GBX;
540 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
541
542 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
543 if (ret < 0)
544 return ret;
545
546 ret &= ~MDIO_CTRL1_SPEEDSEL;
547 ret |= MDIO_CTRL1_SPEED1G;
548 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
549
550 ret = amd_xgbe_phy_pcs_power_cycle(phydev);
551 if (ret < 0)
552 return ret;
553
554 /* Set SerDes to 1G speed */
555 amd_xgbe_phy_serdes_start_ratechange(phydev);
556
557 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_1000_RATE);
558 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_1000_WORD);
559 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP, SPEED_1000_TXAMP);
560 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_1000_PLL);
561 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PI_SPD_SEL, SPEED_1000_CDR);
562
563 XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA, RXTX_1000_BLWC);
564 XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, RXTX_1000_PQ);
565
566 amd_xgbe_phy_serdes_complete_ratechange(phydev);
567
568 priv->mode = AMD_XGBE_MODE_KX;
569
570 return 0;
571}
572
573static int amd_xgbe_phy_switch_mode(struct phy_device *phydev)
574{
575 struct amd_xgbe_phy_priv *priv = phydev->priv;
576 int ret;
577
578 /* If we are in KR switch to KX, and vice-versa */
Lendacky, Thomasf0476042014-07-29 08:57:25 -0500579 if (priv->mode == AMD_XGBE_MODE_KR) {
580 if (priv->speed_set == AMD_XGBE_PHY_SPEEDSET_1000_10000)
581 ret = amd_xgbe_phy_gmii_mode(phydev);
582 else
583 ret = amd_xgbe_phy_gmii_2500_mode(phydev);
584 } else {
Lendacky, Thomas4d874b32014-06-05 09:15:12 -0500585 ret = amd_xgbe_phy_xgmii_mode(phydev);
Lendacky, Thomasf0476042014-07-29 08:57:25 -0500586 }
Lendacky, Thomas4d874b32014-06-05 09:15:12 -0500587
588 return ret;
589}
590
591static enum amd_xgbe_phy_an amd_xgbe_an_switch_mode(struct phy_device *phydev)
592{
593 int ret;
594
595 ret = amd_xgbe_phy_switch_mode(phydev);
596 if (ret < 0)
597 return AMD_XGBE_AN_ERROR;
598
599 return AMD_XGBE_AN_START;
600}
601
602static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev,
603 enum amd_xgbe_phy_rx *state)
604{
605 struct amd_xgbe_phy_priv *priv = phydev->priv;
606 int ad_reg, lp_reg, ret;
607
608 *state = AMD_XGBE_RX_COMPLETE;
609
610 /* If we're in KX mode then we're done */
611 if (priv->mode == AMD_XGBE_MODE_KX)
612 return AMD_XGBE_AN_EVENT;
613
614 /* Enable/Disable FEC */
615 ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
616 if (ad_reg < 0)
617 return AMD_XGBE_AN_ERROR;
618
619 lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 2);
620 if (lp_reg < 0)
621 return AMD_XGBE_AN_ERROR;
622
623 ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_CTRL);
624 if (ret < 0)
625 return AMD_XGBE_AN_ERROR;
626
627 if ((ad_reg & 0xc000) && (lp_reg & 0xc000))
628 ret |= 0x01;
629 else
630 ret &= ~0x01;
631
632 phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_CTRL, ret);
633
634 /* Start KR training */
635 ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
636 if (ret < 0)
637 return AMD_XGBE_AN_ERROR;
638
639 ret |= 0x01;
640 phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
641
642 return AMD_XGBE_AN_EVENT;
643}
644
645static enum amd_xgbe_phy_an amd_xgbe_an_tx_xnp(struct phy_device *phydev,
646 enum amd_xgbe_phy_rx *state)
647{
648 u16 msg;
649
650 *state = AMD_XGBE_RX_XNP;
651
652 msg = XNP_MCF_NULL_MESSAGE;
653 msg |= XNP_MP_FORMATTED;
654
655 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP + 2, 0);
656 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP + 1, 0);
657 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP, msg);
658
659 return AMD_XGBE_AN_EVENT;
660}
661
662static enum amd_xgbe_phy_an amd_xgbe_an_rx_bpa(struct phy_device *phydev,
663 enum amd_xgbe_phy_rx *state)
664{
665 struct amd_xgbe_phy_priv *priv = phydev->priv;
666 unsigned int link_support;
667 int ret, ad_reg, lp_reg;
668
669 /* Read Base Ability register 2 first */
670 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 1);
671 if (ret < 0)
672 return AMD_XGBE_AN_ERROR;
673
674 /* Check for a supported mode, otherwise restart in a different one */
675 link_support = (priv->mode == AMD_XGBE_MODE_KR) ? 0x80 : 0x20;
676 if (!(ret & link_support))
677 return amd_xgbe_an_switch_mode(phydev);
678
679 /* Check Extended Next Page support */
680 ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
681 if (ad_reg < 0)
682 return AMD_XGBE_AN_ERROR;
683
684 lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
685 if (lp_reg < 0)
686 return AMD_XGBE_AN_ERROR;
687
688 return ((ad_reg & XNP_NP_EXCHANGE) || (lp_reg & XNP_NP_EXCHANGE)) ?
689 amd_xgbe_an_tx_xnp(phydev, state) :
690 amd_xgbe_an_tx_training(phydev, state);
691}
692
693static enum amd_xgbe_phy_an amd_xgbe_an_rx_xnp(struct phy_device *phydev,
694 enum amd_xgbe_phy_rx *state)
695{
696 int ad_reg, lp_reg;
697
698 /* Check Extended Next Page support */
699 ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
700 if (ad_reg < 0)
701 return AMD_XGBE_AN_ERROR;
702
703 lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
704 if (lp_reg < 0)
705 return AMD_XGBE_AN_ERROR;
706
707 return ((ad_reg & XNP_NP_EXCHANGE) || (lp_reg & XNP_NP_EXCHANGE)) ?
708 amd_xgbe_an_tx_xnp(phydev, state) :
709 amd_xgbe_an_tx_training(phydev, state);
710}
711
712static enum amd_xgbe_phy_an amd_xgbe_an_start(struct phy_device *phydev)
713{
714 struct amd_xgbe_phy_priv *priv = phydev->priv;
715 int ret;
716
717 /* Be sure we aren't looping trying to negotiate */
718 if (priv->mode == AMD_XGBE_MODE_KR) {
719 if (priv->kr_state != AMD_XGBE_RX_READY)
720 return AMD_XGBE_AN_NO_LINK;
721 priv->kr_state = AMD_XGBE_RX_BPA;
722 } else {
723 if (priv->kx_state != AMD_XGBE_RX_READY)
724 return AMD_XGBE_AN_NO_LINK;
725 priv->kx_state = AMD_XGBE_RX_BPA;
726 }
727
728 /* Set up Advertisement register 3 first */
729 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
730 if (ret < 0)
731 return AMD_XGBE_AN_ERROR;
732
733 if (phydev->supported & SUPPORTED_10000baseR_FEC)
734 ret |= 0xc000;
735 else
736 ret &= ~0xc000;
737
738 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, ret);
739
740 /* Set up Advertisement register 2 next */
741 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
742 if (ret < 0)
743 return AMD_XGBE_AN_ERROR;
744
745 if (phydev->supported & SUPPORTED_10000baseKR_Full)
746 ret |= 0x80;
747 else
748 ret &= ~0x80;
749
Lendacky, Thomasf0476042014-07-29 08:57:25 -0500750 if ((phydev->supported & SUPPORTED_1000baseKX_Full) ||
751 (phydev->supported & SUPPORTED_2500baseX_Full))
Lendacky, Thomas4d874b32014-06-05 09:15:12 -0500752 ret |= 0x20;
753 else
754 ret &= ~0x20;
755
756 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1, ret);
757
758 /* Set up Advertisement register 1 last */
759 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
760 if (ret < 0)
761 return AMD_XGBE_AN_ERROR;
762
763 if (phydev->supported & SUPPORTED_Pause)
764 ret |= 0x400;
765 else
766 ret &= ~0x400;
767
768 if (phydev->supported & SUPPORTED_Asym_Pause)
769 ret |= 0x800;
770 else
771 ret &= ~0x800;
772
773 /* We don't intend to perform XNP */
774 ret &= ~XNP_NP_EXCHANGE;
775
776 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE, ret);
777
778 /* Enable and start auto-negotiation */
779 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
780
781 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
782 if (ret < 0)
783 return AMD_XGBE_AN_ERROR;
784
785 ret |= MDIO_AN_CTRL1_ENABLE;
786 ret |= MDIO_AN_CTRL1_RESTART;
787 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, ret);
788
789 return AMD_XGBE_AN_EVENT;
790}
791
792static enum amd_xgbe_phy_an amd_xgbe_an_event(struct phy_device *phydev)
793{
794 enum amd_xgbe_phy_an new_state;
795 int ret;
796
797 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT);
798 if (ret < 0)
799 return AMD_XGBE_AN_ERROR;
800
801 new_state = AMD_XGBE_AN_EVENT;
802 if (ret & XGBE_AN_PG_RCV)
803 new_state = AMD_XGBE_AN_PAGE_RECEIVED;
804 else if (ret & XGBE_AN_INC_LINK)
805 new_state = AMD_XGBE_AN_INCOMPAT_LINK;
806 else if (ret & XGBE_AN_INT_CMPLT)
807 new_state = AMD_XGBE_AN_COMPLETE;
808
809 if (new_state != AMD_XGBE_AN_EVENT)
810 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
811
812 return new_state;
813}
814
815static enum amd_xgbe_phy_an amd_xgbe_an_page_received(struct phy_device *phydev)
816{
817 struct amd_xgbe_phy_priv *priv = phydev->priv;
818 enum amd_xgbe_phy_rx *state;
819 int ret;
820
821 state = (priv->mode == AMD_XGBE_MODE_KR) ? &priv->kr_state
822 : &priv->kx_state;
823
824 switch (*state) {
825 case AMD_XGBE_RX_BPA:
826 ret = amd_xgbe_an_rx_bpa(phydev, state);
827 break;
828
829 case AMD_XGBE_RX_XNP:
830 ret = amd_xgbe_an_rx_xnp(phydev, state);
831 break;
832
833 default:
834 ret = AMD_XGBE_AN_ERROR;
835 }
836
837 return ret;
838}
839
840static enum amd_xgbe_phy_an amd_xgbe_an_incompat_link(struct phy_device *phydev)
841{
842 return amd_xgbe_an_switch_mode(phydev);
843}
844
845static void amd_xgbe_an_state_machine(struct work_struct *work)
846{
847 struct amd_xgbe_phy_priv *priv = container_of(work,
848 struct amd_xgbe_phy_priv,
849 an_work);
850 struct phy_device *phydev = priv->phydev;
851 enum amd_xgbe_phy_an cur_state;
852 int sleep;
853
854 while (1) {
855 mutex_lock(&priv->an_mutex);
856
857 cur_state = priv->an_state;
858
859 switch (priv->an_state) {
860 case AMD_XGBE_AN_START:
861 priv->an_state = amd_xgbe_an_start(phydev);
862 break;
863
864 case AMD_XGBE_AN_EVENT:
865 priv->an_state = amd_xgbe_an_event(phydev);
866 break;
867
868 case AMD_XGBE_AN_PAGE_RECEIVED:
869 priv->an_state = amd_xgbe_an_page_received(phydev);
870 break;
871
872 case AMD_XGBE_AN_INCOMPAT_LINK:
873 priv->an_state = amd_xgbe_an_incompat_link(phydev);
874 break;
875
876 case AMD_XGBE_AN_COMPLETE:
877 case AMD_XGBE_AN_NO_LINK:
878 case AMD_XGBE_AN_EXIT:
879 goto exit_unlock;
880
881 default:
882 priv->an_state = AMD_XGBE_AN_ERROR;
883 }
884
885 if (priv->an_state == AMD_XGBE_AN_ERROR) {
886 netdev_err(phydev->attached_dev,
887 "error during auto-negotiation, state=%u\n",
888 cur_state);
889 goto exit_unlock;
890 }
891
892 sleep = (priv->an_state == AMD_XGBE_AN_EVENT) ? 1 : 0;
893
894 mutex_unlock(&priv->an_mutex);
895
896 if (sleep)
897 usleep_range(20, 50);
898 }
899
900exit_unlock:
901 priv->an_result = priv->an_state;
902 priv->an_state = AMD_XGBE_AN_READY;
903
904 mutex_unlock(&priv->an_mutex);
905}
906
907static int amd_xgbe_phy_soft_reset(struct phy_device *phydev)
908{
909 int count, ret;
910
911 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
912 if (ret < 0)
913 return ret;
914
915 ret |= MDIO_CTRL1_RESET;
916 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
917
918 count = 50;
919 do {
920 msleep(20);
921 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
922 if (ret < 0)
923 return ret;
924 } while ((ret & MDIO_CTRL1_RESET) && --count);
925
926 if (ret & MDIO_CTRL1_RESET)
927 return -ETIMEDOUT;
928
929 return 0;
930}
931
932static int amd_xgbe_phy_config_init(struct phy_device *phydev)
933{
Lendacky, Thomasf0476042014-07-29 08:57:25 -0500934 struct amd_xgbe_phy_priv *priv = phydev->priv;
935
Lendacky, Thomas4d874b32014-06-05 09:15:12 -0500936 /* Initialize supported features */
937 phydev->supported = SUPPORTED_Autoneg;
938 phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
939 phydev->supported |= SUPPORTED_Backplane;
Lendacky, Thomas4d874b32014-06-05 09:15:12 -0500940 phydev->supported |= SUPPORTED_10000baseKR_Full |
941 SUPPORTED_10000baseR_FEC;
Lendacky, Thomasf0476042014-07-29 08:57:25 -0500942 switch (priv->speed_set) {
943 case AMD_XGBE_PHY_SPEEDSET_1000_10000:
944 phydev->supported |= SUPPORTED_1000baseKX_Full;
945 break;
946 case AMD_XGBE_PHY_SPEEDSET_2500_10000:
947 phydev->supported |= SUPPORTED_2500baseX_Full;
948 break;
949 }
Lendacky, Thomas4d874b32014-06-05 09:15:12 -0500950 phydev->advertising = phydev->supported;
951
952 /* Turn off and clear interrupts */
953 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
954 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
955
956 return 0;
957}
958
959static int amd_xgbe_phy_setup_forced(struct phy_device *phydev)
960{
961 int ret;
962
963 /* Disable auto-negotiation */
964 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
965 if (ret < 0)
966 return ret;
967
968 ret &= ~MDIO_AN_CTRL1_ENABLE;
969 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, ret);
970
971 /* Validate/Set specified speed */
972 switch (phydev->speed) {
973 case SPEED_10000:
974 ret = amd_xgbe_phy_xgmii_mode(phydev);
975 break;
976
977 case SPEED_2500:
978 ret = amd_xgbe_phy_gmii_2500_mode(phydev);
979 break;
980
981 case SPEED_1000:
982 ret = amd_xgbe_phy_gmii_mode(phydev);
983 break;
984
985 default:
986 ret = -EINVAL;
987 }
988
989 if (ret < 0)
990 return ret;
991
992 /* Validate duplex mode */
993 if (phydev->duplex != DUPLEX_FULL)
994 return -EINVAL;
995
996 phydev->pause = 0;
997 phydev->asym_pause = 0;
998
999 return 0;
1000}
1001
1002static int amd_xgbe_phy_config_aneg(struct phy_device *phydev)
1003{
1004 struct amd_xgbe_phy_priv *priv = phydev->priv;
1005 u32 mmd_mask = phydev->c45_ids.devices_in_package;
1006 int ret;
1007
1008 if (phydev->autoneg != AUTONEG_ENABLE)
1009 return amd_xgbe_phy_setup_forced(phydev);
1010
1011 /* Make sure we have the AN MMD present */
1012 if (!(mmd_mask & MDIO_DEVS_AN))
1013 return -EINVAL;
1014
1015 /* Get the current speed mode */
1016 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
1017 if (ret < 0)
1018 return ret;
1019
1020 /* Start/Restart the auto-negotiation state machine */
1021 mutex_lock(&priv->an_mutex);
1022 priv->an_result = AMD_XGBE_AN_READY;
1023 priv->an_state = AMD_XGBE_AN_START;
1024 priv->kr_state = AMD_XGBE_RX_READY;
1025 priv->kx_state = AMD_XGBE_RX_READY;
1026 mutex_unlock(&priv->an_mutex);
1027
1028 queue_work(priv->an_workqueue, &priv->an_work);
1029
1030 return 0;
1031}
1032
1033static int amd_xgbe_phy_aneg_done(struct phy_device *phydev)
1034{
1035 struct amd_xgbe_phy_priv *priv = phydev->priv;
1036 enum amd_xgbe_phy_an state;
1037
1038 mutex_lock(&priv->an_mutex);
1039 state = priv->an_result;
1040 mutex_unlock(&priv->an_mutex);
1041
1042 return (state == AMD_XGBE_AN_COMPLETE);
1043}
1044
1045static int amd_xgbe_phy_update_link(struct phy_device *phydev)
1046{
1047 struct amd_xgbe_phy_priv *priv = phydev->priv;
1048 enum amd_xgbe_phy_an state;
1049 unsigned int check_again, autoneg;
1050 int ret;
1051
1052 /* If we're doing auto-negotiation don't report link down */
1053 mutex_lock(&priv->an_mutex);
1054 state = priv->an_state;
1055 mutex_unlock(&priv->an_mutex);
1056
1057 if (state != AMD_XGBE_AN_READY) {
1058 phydev->link = 1;
1059 return 0;
1060 }
1061
1062 /* Since the device can be in the wrong mode when a link is
1063 * (re-)established (cable connected after the interface is
1064 * up, etc.), the link status may report no link. If there
1065 * is no link, try switching modes and checking the status
Lendacky, Thomasf0476042014-07-29 08:57:25 -05001066 * again if auto negotiation is enabled.
Lendacky, Thomas4d874b32014-06-05 09:15:12 -05001067 */
Lendacky, Thomasf0476042014-07-29 08:57:25 -05001068 check_again = (phydev->autoneg == AUTONEG_ENABLE) ? 1 : 0;
Lendacky, Thomas4d874b32014-06-05 09:15:12 -05001069again:
1070 /* Link status is latched low, so read once to clear
1071 * and then read again to get current state
1072 */
1073 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1);
1074 if (ret < 0)
1075 return ret;
1076
1077 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1);
1078 if (ret < 0)
1079 return ret;
1080
1081 phydev->link = (ret & MDIO_STAT1_LSTATUS) ? 1 : 0;
1082
1083 if (!phydev->link) {
Lendacky, Thomas4d874b32014-06-05 09:15:12 -05001084 if (check_again) {
Lendacky, Thomasf0476042014-07-29 08:57:25 -05001085 ret = amd_xgbe_phy_switch_mode(phydev);
1086 if (ret < 0)
1087 return ret;
Lendacky, Thomas4d874b32014-06-05 09:15:12 -05001088 check_again = 0;
1089 goto again;
1090 }
1091 }
1092
1093 autoneg = (phydev->link && !priv->link) ? 1 : 0;
1094 priv->link = phydev->link;
1095 if (autoneg) {
1096 /* Link is (back) up, re-start auto-negotiation */
1097 ret = amd_xgbe_phy_config_aneg(phydev);
1098 if (ret < 0)
1099 return ret;
1100 }
1101
1102 return 0;
1103}
1104
1105static int amd_xgbe_phy_read_status(struct phy_device *phydev)
1106{
Lendacky, Thomasf0476042014-07-29 08:57:25 -05001107 struct amd_xgbe_phy_priv *priv = phydev->priv;
Lendacky, Thomas4d874b32014-06-05 09:15:12 -05001108 u32 mmd_mask = phydev->c45_ids.devices_in_package;
1109 int ret, mode, ad_ret, lp_ret;
1110
1111 ret = amd_xgbe_phy_update_link(phydev);
1112 if (ret)
1113 return ret;
1114
1115 mode = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
1116 if (mode < 0)
1117 return mode;
1118 mode &= MDIO_PCS_CTRL2_TYPE;
1119
1120 if (phydev->autoneg == AUTONEG_ENABLE) {
1121 if (!(mmd_mask & MDIO_DEVS_AN))
1122 return -EINVAL;
1123
1124 if (!amd_xgbe_phy_aneg_done(phydev))
1125 return 0;
1126
1127 /* Compare Advertisement and Link Partner register 1 */
1128 ad_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
1129 if (ad_ret < 0)
1130 return ad_ret;
1131 lp_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
1132 if (lp_ret < 0)
1133 return lp_ret;
1134
1135 ad_ret &= lp_ret;
1136 phydev->pause = (ad_ret & 0x400) ? 1 : 0;
1137 phydev->asym_pause = (ad_ret & 0x800) ? 1 : 0;
1138
1139 /* Compare Advertisement and Link Partner register 2 */
1140 ad_ret = phy_read_mmd(phydev, MDIO_MMD_AN,
1141 MDIO_AN_ADVERTISE + 1);
1142 if (ad_ret < 0)
1143 return ad_ret;
1144 lp_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 1);
1145 if (lp_ret < 0)
1146 return lp_ret;
1147
1148 ad_ret &= lp_ret;
1149 if (ad_ret & 0x80) {
1150 phydev->speed = SPEED_10000;
1151 if (mode != MDIO_PCS_CTRL2_10GBR) {
1152 ret = amd_xgbe_phy_xgmii_mode(phydev);
1153 if (ret < 0)
1154 return ret;
1155 }
1156 } else {
Lendacky, Thomasf0476042014-07-29 08:57:25 -05001157 int (*mode_fcn)(struct phy_device *);
1158
1159 if (priv->speed_set ==
1160 AMD_XGBE_PHY_SPEEDSET_1000_10000) {
1161 phydev->speed = SPEED_1000;
1162 mode_fcn = amd_xgbe_phy_gmii_mode;
1163 } else {
1164 phydev->speed = SPEED_2500;
1165 mode_fcn = amd_xgbe_phy_gmii_2500_mode;
1166 }
1167
Lendacky, Thomas4d874b32014-06-05 09:15:12 -05001168 if (mode == MDIO_PCS_CTRL2_10GBR) {
Lendacky, Thomasf0476042014-07-29 08:57:25 -05001169 ret = mode_fcn(phydev);
Lendacky, Thomas4d874b32014-06-05 09:15:12 -05001170 if (ret < 0)
1171 return ret;
1172 }
1173 }
1174
1175 phydev->duplex = DUPLEX_FULL;
1176 } else {
Lendacky, Thomasf0476042014-07-29 08:57:25 -05001177 if (mode == MDIO_PCS_CTRL2_10GBR) {
1178 phydev->speed = SPEED_10000;
1179 } else {
1180 if (priv->speed_set ==
1181 AMD_XGBE_PHY_SPEEDSET_1000_10000)
1182 phydev->speed = SPEED_1000;
1183 else
1184 phydev->speed = SPEED_2500;
1185 }
Lendacky, Thomas4d874b32014-06-05 09:15:12 -05001186 phydev->duplex = DUPLEX_FULL;
1187 phydev->pause = 0;
1188 phydev->asym_pause = 0;
1189 }
1190
1191 return 0;
1192}
1193
1194static int amd_xgbe_phy_suspend(struct phy_device *phydev)
1195{
1196 int ret;
1197
1198 mutex_lock(&phydev->lock);
1199
1200 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
1201 if (ret < 0)
1202 goto unlock;
1203
1204 ret |= MDIO_CTRL1_LPOWER;
1205 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
1206
1207 ret = 0;
1208
1209unlock:
1210 mutex_unlock(&phydev->lock);
1211
1212 return ret;
1213}
1214
1215static int amd_xgbe_phy_resume(struct phy_device *phydev)
1216{
1217 int ret;
1218
1219 mutex_lock(&phydev->lock);
1220
1221 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
1222 if (ret < 0)
1223 goto unlock;
1224
1225 ret &= ~MDIO_CTRL1_LPOWER;
1226 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
1227
1228 ret = 0;
1229
1230unlock:
1231 mutex_unlock(&phydev->lock);
1232
1233 return ret;
1234}
1235
1236static int amd_xgbe_phy_probe(struct phy_device *phydev)
1237{
1238 struct amd_xgbe_phy_priv *priv;
1239 struct platform_device *pdev;
1240 struct device *dev;
1241 char *wq_name;
Lendacky, Thomasf0476042014-07-29 08:57:25 -05001242 const __be32 *property;
1243 unsigned int speed_set;
Lendacky, Thomas4d874b32014-06-05 09:15:12 -05001244 int ret;
1245
1246 if (!phydev->dev.of_node)
1247 return -EINVAL;
1248
1249 pdev = of_find_device_by_node(phydev->dev.of_node);
1250 if (!pdev)
1251 return -EINVAL;
1252 dev = &pdev->dev;
1253
1254 wq_name = kasprintf(GFP_KERNEL, "%s-amd-xgbe-phy", phydev->bus->name);
1255 if (!wq_name) {
1256 ret = -ENOMEM;
1257 goto err_pdev;
1258 }
1259
1260 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1261 if (!priv) {
1262 ret = -ENOMEM;
1263 goto err_name;
1264 }
1265
1266 priv->pdev = pdev;
1267 priv->dev = dev;
1268 priv->phydev = phydev;
1269
1270 /* Get the device mmio areas */
1271 priv->rxtx_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1272 priv->rxtx_regs = devm_ioremap_resource(dev, priv->rxtx_res);
1273 if (IS_ERR(priv->rxtx_regs)) {
1274 dev_err(dev, "rxtx ioremap failed\n");
1275 ret = PTR_ERR(priv->rxtx_regs);
1276 goto err_priv;
1277 }
1278
1279 priv->sir0_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1280 priv->sir0_regs = devm_ioremap_resource(dev, priv->sir0_res);
1281 if (IS_ERR(priv->sir0_regs)) {
1282 dev_err(dev, "sir0 ioremap failed\n");
1283 ret = PTR_ERR(priv->sir0_regs);
1284 goto err_rxtx;
1285 }
1286
1287 priv->sir1_res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
1288 priv->sir1_regs = devm_ioremap_resource(dev, priv->sir1_res);
1289 if (IS_ERR(priv->sir1_regs)) {
1290 dev_err(dev, "sir1 ioremap failed\n");
1291 ret = PTR_ERR(priv->sir1_regs);
1292 goto err_sir0;
1293 }
1294
Lendacky, Thomasf0476042014-07-29 08:57:25 -05001295 /* Get the device speed set property */
1296 speed_set = 0;
1297 property = of_get_property(dev->of_node, XGBE_PHY_SPEEDSET_PROPERTY,
1298 NULL);
1299 if (property)
1300 speed_set = be32_to_cpu(*property);
1301
1302 switch (speed_set) {
1303 case 0:
1304 priv->speed_set = AMD_XGBE_PHY_SPEEDSET_1000_10000;
1305 break;
1306 case 1:
1307 priv->speed_set = AMD_XGBE_PHY_SPEEDSET_2500_10000;
1308 break;
1309 default:
1310 dev_err(dev, "invalid amd,speed-set property\n");
1311 ret = -EINVAL;
1312 goto err_sir1;
1313 }
1314
Lendacky, Thomas4d874b32014-06-05 09:15:12 -05001315 priv->link = 1;
1316
1317 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
1318 if (ret < 0)
1319 goto err_sir1;
1320 if ((ret & MDIO_PCS_CTRL2_TYPE) == MDIO_PCS_CTRL2_10GBR)
1321 priv->mode = AMD_XGBE_MODE_KR;
1322 else
1323 priv->mode = AMD_XGBE_MODE_KX;
1324
1325 mutex_init(&priv->an_mutex);
1326 INIT_WORK(&priv->an_work, amd_xgbe_an_state_machine);
1327 priv->an_workqueue = create_singlethread_workqueue(wq_name);
1328 if (!priv->an_workqueue) {
1329 ret = -ENOMEM;
1330 goto err_sir1;
1331 }
1332
1333 phydev->priv = priv;
1334
1335 kfree(wq_name);
1336 of_dev_put(pdev);
1337
1338 return 0;
1339
1340err_sir1:
1341 devm_iounmap(dev, priv->sir1_regs);
1342 devm_release_mem_region(dev, priv->sir1_res->start,
1343 resource_size(priv->sir1_res));
1344
1345err_sir0:
1346 devm_iounmap(dev, priv->sir0_regs);
1347 devm_release_mem_region(dev, priv->sir0_res->start,
1348 resource_size(priv->sir0_res));
1349
1350err_rxtx:
1351 devm_iounmap(dev, priv->rxtx_regs);
1352 devm_release_mem_region(dev, priv->rxtx_res->start,
1353 resource_size(priv->rxtx_res));
1354
1355err_priv:
1356 devm_kfree(dev, priv);
1357
1358err_name:
1359 kfree(wq_name);
1360
1361err_pdev:
1362 of_dev_put(pdev);
1363
1364 return ret;
1365}
1366
1367static void amd_xgbe_phy_remove(struct phy_device *phydev)
1368{
1369 struct amd_xgbe_phy_priv *priv = phydev->priv;
1370 struct device *dev = priv->dev;
1371
1372 /* Stop any in process auto-negotiation */
1373 mutex_lock(&priv->an_mutex);
1374 priv->an_state = AMD_XGBE_AN_EXIT;
1375 mutex_unlock(&priv->an_mutex);
1376
1377 flush_workqueue(priv->an_workqueue);
1378 destroy_workqueue(priv->an_workqueue);
1379
1380 /* Release resources */
1381 devm_iounmap(dev, priv->sir1_regs);
1382 devm_release_mem_region(dev, priv->sir1_res->start,
1383 resource_size(priv->sir1_res));
1384
1385 devm_iounmap(dev, priv->sir0_regs);
1386 devm_release_mem_region(dev, priv->sir0_res->start,
1387 resource_size(priv->sir0_res));
1388
1389 devm_iounmap(dev, priv->rxtx_regs);
1390 devm_release_mem_region(dev, priv->rxtx_res->start,
1391 resource_size(priv->rxtx_res));
1392
1393 devm_kfree(dev, priv);
1394}
1395
1396static int amd_xgbe_match_phy_device(struct phy_device *phydev)
1397{
1398 return phydev->c45_ids.device_ids[MDIO_MMD_PCS] == XGBE_PHY_ID;
1399}
1400
1401static struct phy_driver amd_xgbe_phy_driver[] = {
1402 {
1403 .phy_id = XGBE_PHY_ID,
1404 .phy_id_mask = XGBE_PHY_MASK,
1405 .name = "AMD XGBE PHY",
1406 .features = 0,
1407 .probe = amd_xgbe_phy_probe,
1408 .remove = amd_xgbe_phy_remove,
1409 .soft_reset = amd_xgbe_phy_soft_reset,
1410 .config_init = amd_xgbe_phy_config_init,
1411 .suspend = amd_xgbe_phy_suspend,
1412 .resume = amd_xgbe_phy_resume,
1413 .config_aneg = amd_xgbe_phy_config_aneg,
1414 .aneg_done = amd_xgbe_phy_aneg_done,
1415 .read_status = amd_xgbe_phy_read_status,
1416 .match_phy_device = amd_xgbe_match_phy_device,
1417 .driver = {
1418 .owner = THIS_MODULE,
1419 },
1420 },
1421};
1422
1423static int __init amd_xgbe_phy_init(void)
1424{
1425 return phy_drivers_register(amd_xgbe_phy_driver,
1426 ARRAY_SIZE(amd_xgbe_phy_driver));
1427}
1428
1429static void __exit amd_xgbe_phy_exit(void)
1430{
1431 phy_drivers_unregister(amd_xgbe_phy_driver,
1432 ARRAY_SIZE(amd_xgbe_phy_driver));
1433}
1434
1435module_init(amd_xgbe_phy_init);
1436module_exit(amd_xgbe_phy_exit);
1437
françois romieua25aafa2014-06-07 11:07:48 +02001438static struct mdio_device_id __maybe_unused amd_xgbe_phy_ids[] = {
Lendacky, Thomas4d874b32014-06-05 09:15:12 -05001439 { XGBE_PHY_ID, XGBE_PHY_MASK },
1440 { }
1441};
1442MODULE_DEVICE_TABLE(mdio, amd_xgbe_phy_ids);