blob: 9c3c273f45351555870b0d7eb50288e5784d8f45 [file] [log] [blame]
Sunil Goutham4863dea2015-05-26 19:20:15 -07001/*
2 * Copyright (C) 2015 Cavium, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
7 */
8
David Daney46b903a2015-08-10 17:58:37 -07009#include <linux/acpi.h>
Sunil Goutham4863dea2015-05-26 19:20:15 -070010#include <linux/module.h>
11#include <linux/interrupt.h>
12#include <linux/pci.h>
13#include <linux/netdevice.h>
14#include <linux/etherdevice.h>
15#include <linux/phy.h>
16#include <linux/of.h>
17#include <linux/of_mdio.h>
18#include <linux/of_net.h>
19
20#include "nic_reg.h"
21#include "nic.h"
22#include "thunder_bgx.h"
23
24#define DRV_NAME "thunder-BGX"
25#define DRV_VERSION "1.0"
26
27struct lmac {
28 struct bgx *bgx;
29 int dmac;
David Daney46b903a2015-08-10 17:58:37 -070030 u8 mac[ETH_ALEN];
Sunil Goutham0bcb7d52016-08-12 16:51:30 +053031 u8 lmac_type;
32 u8 lane_to_sds;
33 bool use_training;
Sunil Goutham4863dea2015-05-26 19:20:15 -070034 bool link_up;
35 int lmacid; /* ID within BGX */
36 int lmacid_bd; /* ID on board */
37 struct net_device netdev;
38 struct phy_device *phydev;
39 unsigned int last_duplex;
40 unsigned int last_link;
41 unsigned int last_speed;
42 bool is_sgmii;
43 struct delayed_work dwork;
44 struct workqueue_struct *check_link;
Aleksey Makarov0c886a12015-06-02 11:00:22 -070045};
Sunil Goutham4863dea2015-05-26 19:20:15 -070046
47struct bgx {
48 u8 bgx_id;
Sunil Goutham4863dea2015-05-26 19:20:15 -070049 struct lmac lmac[MAX_LMAC_PER_BGX];
50 int lmac_count;
Sunil Goutham4863dea2015-05-26 19:20:15 -070051 void __iomem *reg_base;
52 struct pci_dev *pdev;
Sunil Goutham57aaf632016-08-12 16:51:31 +053053 bool is_81xx;
Aleksey Makarov0c886a12015-06-02 11:00:22 -070054};
Sunil Goutham4863dea2015-05-26 19:20:15 -070055
Aleksey Makarovfd7ec062015-06-02 11:00:23 -070056static struct bgx *bgx_vnic[MAX_BGX_THUNDER];
Sunil Goutham4863dea2015-05-26 19:20:15 -070057static int lmac_count; /* Total no of LMACs in system */
58
59static int bgx_xaui_check_link(struct lmac *lmac);
60
61/* Supported devices */
62static const struct pci_device_id bgx_id_table[] = {
63 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_BGX) },
64 { 0, } /* end of table */
65};
66
67MODULE_AUTHOR("Cavium Inc");
68MODULE_DESCRIPTION("Cavium Thunder BGX/MAC Driver");
69MODULE_LICENSE("GPL v2");
70MODULE_VERSION(DRV_VERSION);
71MODULE_DEVICE_TABLE(pci, bgx_id_table);
72
73/* The Cavium ThunderX network controller can *only* be found in SoCs
74 * containing the ThunderX ARM64 CPU implementation. All accesses to the device
75 * registers on this platform are implicitly strongly ordered with respect
76 * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
77 * with no memory barriers in this driver. The readq()/writeq() functions add
78 * explicit ordering operation which in this case are redundant, and only
79 * add overhead.
80 */
81
82/* Register read/write APIs */
83static u64 bgx_reg_read(struct bgx *bgx, u8 lmac, u64 offset)
84{
85 void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
86
87 return readq_relaxed(addr);
88}
89
90static void bgx_reg_write(struct bgx *bgx, u8 lmac, u64 offset, u64 val)
91{
92 void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
93
94 writeq_relaxed(val, addr);
95}
96
97static void bgx_reg_modify(struct bgx *bgx, u8 lmac, u64 offset, u64 val)
98{
99 void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
100
101 writeq_relaxed(val | readq_relaxed(addr), addr);
102}
103
104static int bgx_poll_reg(struct bgx *bgx, u8 lmac, u64 reg, u64 mask, bool zero)
105{
106 int timeout = 100;
107 u64 reg_val;
108
109 while (timeout) {
110 reg_val = bgx_reg_read(bgx, lmac, reg);
111 if (zero && !(reg_val & mask))
112 return 0;
113 if (!zero && (reg_val & mask))
114 return 0;
115 usleep_range(1000, 2000);
116 timeout--;
117 }
118 return 1;
119}
120
121/* Return number of BGX present in HW */
122unsigned bgx_get_map(int node)
123{
124 int i;
125 unsigned map = 0;
126
127 for (i = 0; i < MAX_BGX_PER_CN88XX; i++) {
128 if (bgx_vnic[(node * MAX_BGX_PER_CN88XX) + i])
129 map |= (1 << i);
130 }
131
132 return map;
133}
134EXPORT_SYMBOL(bgx_get_map);
135
136/* Return number of LMAC configured for this BGX */
137int bgx_get_lmac_count(int node, int bgx_idx)
138{
139 struct bgx *bgx;
140
141 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
142 if (bgx)
143 return bgx->lmac_count;
144
145 return 0;
146}
147EXPORT_SYMBOL(bgx_get_lmac_count);
148
149/* Returns the current link status of LMAC */
150void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status)
151{
152 struct bgx_link_status *link = (struct bgx_link_status *)status;
153 struct bgx *bgx;
154 struct lmac *lmac;
155
156 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
157 if (!bgx)
158 return;
159
160 lmac = &bgx->lmac[lmacid];
161 link->link_up = lmac->link_up;
162 link->duplex = lmac->last_duplex;
163 link->speed = lmac->last_speed;
164}
165EXPORT_SYMBOL(bgx_get_lmac_link_state);
166
Aleksey Makarove610cb32015-06-02 11:00:21 -0700167const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid)
Sunil Goutham4863dea2015-05-26 19:20:15 -0700168{
169 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
170
171 if (bgx)
172 return bgx->lmac[lmacid].mac;
173
174 return NULL;
175}
176EXPORT_SYMBOL(bgx_get_lmac_mac);
177
Aleksey Makarove610cb32015-06-02 11:00:21 -0700178void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac)
Sunil Goutham4863dea2015-05-26 19:20:15 -0700179{
180 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
181
182 if (!bgx)
183 return;
184
185 ether_addr_copy(bgx->lmac[lmacid].mac, mac);
186}
187EXPORT_SYMBOL(bgx_set_lmac_mac);
188
Sunil Gouthambc69fdf2015-12-02 15:36:17 +0530189void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
190{
191 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
192 u64 cfg;
193
194 if (!bgx)
195 return;
196
197 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
198 if (enable)
199 cfg |= CMR_PKT_RX_EN | CMR_PKT_TX_EN;
200 else
201 cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN);
202 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
203}
204EXPORT_SYMBOL(bgx_lmac_rx_tx_enable);
205
Sunil Goutham4863dea2015-05-26 19:20:15 -0700206static void bgx_sgmii_change_link_state(struct lmac *lmac)
207{
208 struct bgx *bgx = lmac->bgx;
209 u64 cmr_cfg;
210 u64 port_cfg = 0;
211 u64 misc_ctl = 0;
212
213 cmr_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG);
214 cmr_cfg &= ~CMR_EN;
215 bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
216
217 port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
218 misc_ctl = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL);
219
220 if (lmac->link_up) {
221 misc_ctl &= ~PCS_MISC_CTL_GMX_ENO;
222 port_cfg &= ~GMI_PORT_CFG_DUPLEX;
223 port_cfg |= (lmac->last_duplex << 2);
224 } else {
225 misc_ctl |= PCS_MISC_CTL_GMX_ENO;
226 }
227
228 switch (lmac->last_speed) {
229 case 10:
230 port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
231 port_cfg |= GMI_PORT_CFG_SPEED_MSB; /* speed_msb 1 */
232 port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
233 misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
234 misc_ctl |= 50; /* samp_pt */
235 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
236 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
237 break;
238 case 100:
239 port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
240 port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
241 port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
242 misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
243 misc_ctl |= 5; /* samp_pt */
244 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
245 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
246 break;
247 case 1000:
248 port_cfg |= GMI_PORT_CFG_SPEED; /* speed 1 */
249 port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
250 port_cfg |= GMI_PORT_CFG_SLOT_TIME; /* slottime 1 */
251 misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
252 misc_ctl |= 1; /* samp_pt */
253 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 512);
254 if (lmac->last_duplex)
255 bgx_reg_write(bgx, lmac->lmacid,
256 BGX_GMP_GMI_TXX_BURST, 0);
257 else
258 bgx_reg_write(bgx, lmac->lmacid,
259 BGX_GMP_GMI_TXX_BURST, 8192);
260 break;
261 default:
262 break;
263 }
264 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl);
265 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg);
266
267 port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
268
269 /* renable lmac */
270 cmr_cfg |= CMR_EN;
271 bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
272}
273
Aleksey Makarovfd7ec062015-06-02 11:00:23 -0700274static void bgx_lmac_handler(struct net_device *netdev)
Sunil Goutham4863dea2015-05-26 19:20:15 -0700275{
276 struct lmac *lmac = container_of(netdev, struct lmac, netdev);
xypron.glpk@gmx.de099a7282016-05-17 21:40:38 +0200277 struct phy_device *phydev;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700278 int link_changed = 0;
279
280 if (!lmac)
281 return;
282
xypron.glpk@gmx.de099a7282016-05-17 21:40:38 +0200283 phydev = lmac->phydev;
284
Sunil Goutham4863dea2015-05-26 19:20:15 -0700285 if (!phydev->link && lmac->last_link)
286 link_changed = -1;
287
288 if (phydev->link &&
289 (lmac->last_duplex != phydev->duplex ||
290 lmac->last_link != phydev->link ||
291 lmac->last_speed != phydev->speed)) {
292 link_changed = 1;
293 }
294
295 lmac->last_link = phydev->link;
296 lmac->last_speed = phydev->speed;
297 lmac->last_duplex = phydev->duplex;
298
299 if (!link_changed)
300 return;
301
302 if (link_changed > 0)
303 lmac->link_up = true;
304 else
305 lmac->link_up = false;
306
307 if (lmac->is_sgmii)
308 bgx_sgmii_change_link_state(lmac);
309 else
310 bgx_xaui_check_link(lmac);
311}
312
313u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx)
314{
315 struct bgx *bgx;
316
317 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
318 if (!bgx)
319 return 0;
320
321 if (idx > 8)
322 lmac = 0;
323 return bgx_reg_read(bgx, lmac, BGX_CMRX_RX_STAT0 + (idx * 8));
324}
325EXPORT_SYMBOL(bgx_get_rx_stats);
326
327u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx)
328{
329 struct bgx *bgx;
330
331 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
332 if (!bgx)
333 return 0;
334
335 return bgx_reg_read(bgx, lmac, BGX_CMRX_TX_STAT0 + (idx * 8));
336}
337EXPORT_SYMBOL(bgx_get_tx_stats);
338
339static void bgx_flush_dmac_addrs(struct bgx *bgx, int lmac)
340{
341 u64 offset;
342
343 while (bgx->lmac[lmac].dmac > 0) {
344 offset = ((bgx->lmac[lmac].dmac - 1) * sizeof(u64)) +
345 (lmac * MAX_DMAC_PER_LMAC * sizeof(u64));
346 bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + offset, 0);
347 bgx->lmac[lmac].dmac--;
348 }
349}
350
Sunil Gouthamd77a2382015-08-30 12:29:16 +0300351/* Configure BGX LMAC in internal loopback mode */
352void bgx_lmac_internal_loopback(int node, int bgx_idx,
353 int lmac_idx, bool enable)
354{
355 struct bgx *bgx;
356 struct lmac *lmac;
357 u64 cfg;
358
359 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
360 if (!bgx)
361 return;
362
363 lmac = &bgx->lmac[lmac_idx];
364 if (lmac->is_sgmii) {
365 cfg = bgx_reg_read(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL);
366 if (enable)
367 cfg |= PCS_MRX_CTL_LOOPBACK1;
368 else
369 cfg &= ~PCS_MRX_CTL_LOOPBACK1;
370 bgx_reg_write(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL, cfg);
371 } else {
372 cfg = bgx_reg_read(bgx, lmac_idx, BGX_SPUX_CONTROL1);
373 if (enable)
374 cfg |= SPU_CTL_LOOPBACK;
375 else
376 cfg &= ~SPU_CTL_LOOPBACK;
377 bgx_reg_write(bgx, lmac_idx, BGX_SPUX_CONTROL1, cfg);
378 }
379}
380EXPORT_SYMBOL(bgx_lmac_internal_loopback);
381
Sunil Goutham4863dea2015-05-26 19:20:15 -0700382static int bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid)
383{
384 u64 cfg;
385
386 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_THRESH, 0x30);
387 /* max packet size */
388 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_RXX_JABBER, MAX_FRAME_SIZE);
389
390 /* Disable frame alignment if using preamble */
391 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
392 if (cfg & 1)
393 bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SGMII_CTL, 0);
394
395 /* Enable lmac */
396 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
397
398 /* PCS reset */
399 bgx_reg_modify(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_RESET);
400 if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_CTL,
401 PCS_MRX_CTL_RESET, true)) {
402 dev_err(&bgx->pdev->dev, "BGX PCS reset not completed\n");
403 return -1;
404 }
405
406 /* power down, reset autoneg, autoneg enable */
407 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL);
408 cfg &= ~PCS_MRX_CTL_PWR_DN;
409 cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN);
410 bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg);
411
412 if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS,
413 PCS_MRX_STATUS_AN_CPT, false)) {
414 dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n");
415 return -1;
416 }
417
418 return 0;
419}
420
Sunil Goutham0bcb7d52016-08-12 16:51:30 +0530421static int bgx_lmac_xaui_init(struct bgx *bgx, struct lmac *lmac)
Sunil Goutham4863dea2015-05-26 19:20:15 -0700422{
423 u64 cfg;
Sunil Goutham0bcb7d52016-08-12 16:51:30 +0530424 int lmacid = lmac->lmacid;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700425
426 /* Reset SPU */
427 bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET);
428 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) {
429 dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n");
430 return -1;
431 }
432
433 /* Disable LMAC */
434 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
435 cfg &= ~CMR_EN;
436 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
437
438 bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
439 /* Set interleaved running disparity for RXAUI */
Sunil Goutham0bcb7d52016-08-12 16:51:30 +0530440 if (lmac->lmac_type != BGX_MODE_RXAUI)
Sunil Goutham4863dea2015-05-26 19:20:15 -0700441 bgx_reg_modify(bgx, lmacid,
442 BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
443 else
444 bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL,
445 SPU_MISC_CTL_RX_DIS | SPU_MISC_CTL_INTLV_RDISP);
446
447 /* clear all interrupts */
448 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_INT);
449 bgx_reg_write(bgx, lmacid, BGX_SMUX_RX_INT, cfg);
450 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_INT);
451 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_INT, cfg);
452 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
453 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
454
Sunil Goutham0bcb7d52016-08-12 16:51:30 +0530455 if (lmac->use_training) {
Sunil Goutham4863dea2015-05-26 19:20:15 -0700456 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LP_CUP, 0x00);
457 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_CUP, 0x00);
458 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_REP, 0x00);
459 /* training enable */
460 bgx_reg_modify(bgx, lmacid,
461 BGX_SPUX_BR_PMD_CRTL, SPU_PMD_CRTL_TRAIN_EN);
462 }
463
464 /* Append FCS to each packet */
465 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, SMU_TX_APPEND_FCS_D);
466
467 /* Disable forward error correction */
468 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_FEC_CONTROL);
469 cfg &= ~SPU_FEC_CTL_FEC_EN;
470 bgx_reg_write(bgx, lmacid, BGX_SPUX_FEC_CONTROL, cfg);
471
472 /* Disable autoneg */
473 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_CONTROL);
474 cfg = cfg & ~(SPU_AN_CTL_AN_EN | SPU_AN_CTL_XNP_EN);
475 bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_CONTROL, cfg);
476
477 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_ADV);
Sunil Goutham0bcb7d52016-08-12 16:51:30 +0530478 if (lmac->lmac_type == BGX_MODE_10G_KR)
Sunil Goutham4863dea2015-05-26 19:20:15 -0700479 cfg |= (1 << 23);
Sunil Goutham0bcb7d52016-08-12 16:51:30 +0530480 else if (lmac->lmac_type == BGX_MODE_40G_KR)
Sunil Goutham4863dea2015-05-26 19:20:15 -0700481 cfg |= (1 << 24);
482 else
483 cfg &= ~((1 << 23) | (1 << 24));
484 cfg = cfg & (~((1ULL << 25) | (1ULL << 22) | (1ULL << 12)));
485 bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_ADV, cfg);
486
487 cfg = bgx_reg_read(bgx, 0, BGX_SPU_DBG_CONTROL);
488 cfg &= ~SPU_DBG_CTL_AN_ARB_LINK_CHK_EN;
489 bgx_reg_write(bgx, 0, BGX_SPU_DBG_CONTROL, cfg);
490
491 /* Enable lmac */
492 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
493
494 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_CONTROL1);
495 cfg &= ~SPU_CTL_LOW_POWER;
496 bgx_reg_write(bgx, lmacid, BGX_SPUX_CONTROL1, cfg);
497
498 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_CTL);
499 cfg &= ~SMU_TX_CTL_UNI_EN;
500 cfg |= SMU_TX_CTL_DIC_EN;
501 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_CTL, cfg);
502
503 /* take lmac_count into account */
504 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_THRESH, (0x100 - 1));
505 /* max packet size */
506 bgx_reg_modify(bgx, lmacid, BGX_SMUX_RX_JABBER, MAX_FRAME_SIZE);
507
508 return 0;
509}
510
511static int bgx_xaui_check_link(struct lmac *lmac)
512{
513 struct bgx *bgx = lmac->bgx;
514 int lmacid = lmac->lmacid;
Sunil Goutham0bcb7d52016-08-12 16:51:30 +0530515 int lmac_type = lmac->lmac_type;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700516 u64 cfg;
517
518 bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
Sunil Goutham0bcb7d52016-08-12 16:51:30 +0530519 if (lmac->use_training) {
Sunil Goutham4863dea2015-05-26 19:20:15 -0700520 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
521 if (!(cfg & (1ull << 13))) {
522 cfg = (1ull << 13) | (1ull << 14);
523 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
524 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL);
525 cfg |= (1ull << 0);
526 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL, cfg);
527 return -1;
528 }
529 }
530
531 /* wait for PCS to come out of reset */
532 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) {
533 dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n");
534 return -1;
535 }
536
537 if ((lmac_type == BGX_MODE_10G_KR) || (lmac_type == BGX_MODE_XFI) ||
538 (lmac_type == BGX_MODE_40G_KR) || (lmac_type == BGX_MODE_XLAUI)) {
539 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BR_STATUS1,
540 SPU_BR_STATUS_BLK_LOCK, false)) {
541 dev_err(&bgx->pdev->dev,
542 "SPU_BR_STATUS_BLK_LOCK not completed\n");
543 return -1;
544 }
545 } else {
546 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BX_STATUS,
547 SPU_BX_STATUS_RX_ALIGN, false)) {
548 dev_err(&bgx->pdev->dev,
549 "SPU_BX_STATUS_RX_ALIGN not completed\n");
550 return -1;
551 }
552 }
553
554 /* Clear rcvflt bit (latching high) and read it back */
Sunil Goutham3f4c68c2016-06-27 15:30:02 +0530555 if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT)
556 bgx_reg_modify(bgx, lmacid,
557 BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700558 if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
559 dev_err(&bgx->pdev->dev, "Receive fault, retry training\n");
Sunil Goutham0bcb7d52016-08-12 16:51:30 +0530560 if (lmac->use_training) {
Sunil Goutham4863dea2015-05-26 19:20:15 -0700561 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
562 if (!(cfg & (1ull << 13))) {
563 cfg = (1ull << 13) | (1ull << 14);
564 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
565 cfg = bgx_reg_read(bgx, lmacid,
566 BGX_SPUX_BR_PMD_CRTL);
567 cfg |= (1ull << 0);
568 bgx_reg_write(bgx, lmacid,
569 BGX_SPUX_BR_PMD_CRTL, cfg);
570 return -1;
571 }
572 }
573 return -1;
574 }
575
Sunil Goutham4863dea2015-05-26 19:20:15 -0700576 /* Wait for BGX RX to be idle */
577 if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) {
578 dev_err(&bgx->pdev->dev, "SMU RX not idle\n");
579 return -1;
580 }
581
582 /* Wait for BGX TX to be idle */
583 if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_TX_IDLE, false)) {
584 dev_err(&bgx->pdev->dev, "SMU TX not idle\n");
585 return -1;
586 }
587
Sunil Goutham3f4c68c2016-06-27 15:30:02 +0530588 /* Clear receive packet disable */
Sunil Goutham4863dea2015-05-26 19:20:15 -0700589 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
590 cfg &= ~SPU_MISC_CTL_RX_DIS;
591 bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
Sunil Goutham3f4c68c2016-06-27 15:30:02 +0530592
593 /* Check for MAC RX faults */
594 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_CTL);
595 /* 0 - Link is okay, 1 - Local fault, 2 - Remote fault */
596 cfg &= SMU_RX_CTL_STATUS;
597 if (!cfg)
598 return 0;
599
600 /* Rx local/remote fault seen.
601 * Do lmac reinit to see if condition recovers
602 */
Sunil Goutham0bcb7d52016-08-12 16:51:30 +0530603 bgx_lmac_xaui_init(bgx, lmac);
Sunil Goutham3f4c68c2016-06-27 15:30:02 +0530604
605 return -1;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700606}
607
608static void bgx_poll_for_link(struct work_struct *work)
609{
610 struct lmac *lmac;
Sunil Goutham3f4c68c2016-06-27 15:30:02 +0530611 u64 spu_link, smu_link;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700612
613 lmac = container_of(work, struct lmac, dwork.work);
614
615 /* Receive link is latching low. Force it high and verify it */
616 bgx_reg_modify(lmac->bgx, lmac->lmacid,
617 BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
618 bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1,
619 SPU_STATUS1_RCV_LNK, false);
620
Sunil Goutham3f4c68c2016-06-27 15:30:02 +0530621 spu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
622 smu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SMUX_RX_CTL);
623
624 if ((spu_link & SPU_STATUS1_RCV_LNK) &&
625 !(smu_link & SMU_RX_CTL_STATUS)) {
Sunil Goutham4863dea2015-05-26 19:20:15 -0700626 lmac->link_up = 1;
Sunil Goutham0bcb7d52016-08-12 16:51:30 +0530627 if (lmac->lmac_type == BGX_MODE_XLAUI)
Sunil Goutham4863dea2015-05-26 19:20:15 -0700628 lmac->last_speed = 40000;
629 else
630 lmac->last_speed = 10000;
631 lmac->last_duplex = 1;
632 } else {
633 lmac->link_up = 0;
Sunil Goutham0b72a9a2015-12-02 15:36:16 +0530634 lmac->last_speed = SPEED_UNKNOWN;
635 lmac->last_duplex = DUPLEX_UNKNOWN;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700636 }
637
638 if (lmac->last_link != lmac->link_up) {
Sunil Goutham3f4c68c2016-06-27 15:30:02 +0530639 if (lmac->link_up) {
640 if (bgx_xaui_check_link(lmac)) {
641 /* Errors, clear link_up state */
642 lmac->link_up = 0;
643 lmac->last_speed = SPEED_UNKNOWN;
644 lmac->last_duplex = DUPLEX_UNKNOWN;
645 }
646 }
Sunil Goutham4863dea2015-05-26 19:20:15 -0700647 lmac->last_link = lmac->link_up;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700648 }
649
650 queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2);
651}
652
653static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
654{
655 struct lmac *lmac;
656 u64 cfg;
657
658 lmac = &bgx->lmac[lmacid];
659 lmac->bgx = bgx;
660
Sunil Goutham0bcb7d52016-08-12 16:51:30 +0530661 if (lmac->lmac_type == BGX_MODE_SGMII) {
Sunil Goutham4863dea2015-05-26 19:20:15 -0700662 lmac->is_sgmii = 1;
663 if (bgx_lmac_sgmii_init(bgx, lmacid))
664 return -1;
665 } else {
666 lmac->is_sgmii = 0;
Sunil Goutham0bcb7d52016-08-12 16:51:30 +0530667 if (bgx_lmac_xaui_init(bgx, lmac))
Sunil Goutham4863dea2015-05-26 19:20:15 -0700668 return -1;
669 }
670
671 if (lmac->is_sgmii) {
672 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
673 cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */
674 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND, cfg);
675 bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_MIN_PKT, 60 - 1);
676 } else {
677 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_APPEND);
678 cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */
679 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, cfg);
680 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_MIN_PKT, 60 + 4);
681 }
682
683 /* Enable lmac */
Sunil Gouthambc69fdf2015-12-02 15:36:17 +0530684 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700685
686 /* Restore default cfg, incase low level firmware changed it */
687 bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03);
688
Sunil Goutham0bcb7d52016-08-12 16:51:30 +0530689 if ((lmac->lmac_type != BGX_MODE_XFI) &&
690 (lmac->lmac_type != BGX_MODE_XLAUI) &&
691 (lmac->lmac_type != BGX_MODE_40G_KR) &&
692 (lmac->lmac_type != BGX_MODE_10G_KR)) {
Sunil Goutham4863dea2015-05-26 19:20:15 -0700693 if (!lmac->phydev)
694 return -ENODEV;
695
696 lmac->phydev->dev_flags = 0;
697
698 if (phy_connect_direct(&lmac->netdev, lmac->phydev,
699 bgx_lmac_handler,
700 PHY_INTERFACE_MODE_SGMII))
701 return -ENODEV;
702
703 phy_start_aneg(lmac->phydev);
704 } else {
705 lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND |
706 WQ_MEM_RECLAIM, 1);
707 if (!lmac->check_link)
708 return -ENOMEM;
709 INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link);
710 queue_delayed_work(lmac->check_link, &lmac->dwork, 0);
711 }
712
713 return 0;
714}
715
Aleksey Makarovfd7ec062015-06-02 11:00:23 -0700716static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
Sunil Goutham4863dea2015-05-26 19:20:15 -0700717{
718 struct lmac *lmac;
Sunil Goutham3f4c68c2016-06-27 15:30:02 +0530719 u64 cfg;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700720
721 lmac = &bgx->lmac[lmacid];
722 if (lmac->check_link) {
723 /* Destroy work queue */
Thanneeru Srinivasulua7b1f532015-12-02 15:36:14 +0530724 cancel_delayed_work_sync(&lmac->dwork);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700725 destroy_workqueue(lmac->check_link);
726 }
727
Sunil Goutham3f4c68c2016-06-27 15:30:02 +0530728 /* Disable packet reception */
729 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
730 cfg &= ~CMR_PKT_RX_EN;
731 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
732
733 /* Give chance for Rx/Tx FIFO to get drained */
734 bgx_poll_reg(bgx, lmacid, BGX_CMRX_RX_FIFO_LEN, (u64)0x1FFF, true);
735 bgx_poll_reg(bgx, lmacid, BGX_CMRX_TX_FIFO_LEN, (u64)0x3FFF, true);
736
737 /* Disable packet transmission */
738 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
739 cfg &= ~CMR_PKT_TX_EN;
740 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
741
742 /* Disable serdes lanes */
743 if (!lmac->is_sgmii)
744 bgx_reg_modify(bgx, lmacid,
745 BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
746 else
747 bgx_reg_modify(bgx, lmacid,
748 BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_PWR_DN);
749
750 /* Disable LMAC */
751 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
752 cfg &= ~CMR_EN;
753 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
754
Sunil Goutham4863dea2015-05-26 19:20:15 -0700755 bgx_flush_dmac_addrs(bgx, lmacid);
756
Sunil Goutham0bcb7d52016-08-12 16:51:30 +0530757 if ((lmac->lmac_type != BGX_MODE_XFI) &&
758 (lmac->lmac_type != BGX_MODE_XLAUI) &&
759 (lmac->lmac_type != BGX_MODE_40G_KR) &&
760 (lmac->lmac_type != BGX_MODE_10G_KR) && lmac->phydev)
Sunil Goutham4863dea2015-05-26 19:20:15 -0700761 phy_disconnect(lmac->phydev);
762
763 lmac->phydev = NULL;
764}
765
Sunil Goutham4863dea2015-05-26 19:20:15 -0700766static void bgx_init_hw(struct bgx *bgx)
767{
768 int i;
Sunil Goutham0bcb7d52016-08-12 16:51:30 +0530769 struct lmac *lmac;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700770
771 bgx_reg_modify(bgx, 0, BGX_CMR_GLOBAL_CFG, CMR_GLOBAL_CFG_FCS_STRIP);
772 if (bgx_reg_read(bgx, 0, BGX_CMR_BIST_STATUS))
773 dev_err(&bgx->pdev->dev, "BGX%d BIST failed\n", bgx->bgx_id);
774
775 /* Set lmac type and lane2serdes mapping */
776 for (i = 0; i < bgx->lmac_count; i++) {
Sunil Goutham0bcb7d52016-08-12 16:51:30 +0530777 lmac = &bgx->lmac[i];
Sunil Goutham4863dea2015-05-26 19:20:15 -0700778 bgx_reg_write(bgx, i, BGX_CMRX_CFG,
Sunil Goutham0bcb7d52016-08-12 16:51:30 +0530779 (lmac->lmac_type << 8) | lmac->lane_to_sds);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700780 bgx->lmac[i].lmacid_bd = lmac_count;
781 lmac_count++;
782 }
783
784 bgx_reg_write(bgx, 0, BGX_CMR_TX_LMACS, bgx->lmac_count);
785 bgx_reg_write(bgx, 0, BGX_CMR_RX_LMACS, bgx->lmac_count);
786
787 /* Set the backpressure AND mask */
788 for (i = 0; i < bgx->lmac_count; i++)
789 bgx_reg_modify(bgx, 0, BGX_CMR_CHAN_MSK_AND,
790 ((1ULL << MAX_BGX_CHANS_PER_LMAC) - 1) <<
791 (i * MAX_BGX_CHANS_PER_LMAC));
792
793 /* Disable all MAC filtering */
794 for (i = 0; i < RX_DMAC_COUNT; i++)
795 bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + (i * 8), 0x00);
796
797 /* Disable MAC steering (NCSI traffic) */
798 for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++)
799 bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00);
800}
801
Sunil Goutham0bcb7d52016-08-12 16:51:30 +0530802static void bgx_print_qlm_mode(struct bgx *bgx, u8 lmacid)
Sunil Goutham4863dea2015-05-26 19:20:15 -0700803{
804 struct device *dev = &bgx->pdev->dev;
Sunil Goutham0bcb7d52016-08-12 16:51:30 +0530805 struct lmac *lmac;
806 char str[20];
Sunil Goutham57aaf632016-08-12 16:51:31 +0530807 u8 dlm;
808
809 if (lmacid > MAX_LMAC_PER_BGX)
810 return;
Sunil Goutham0bcb7d52016-08-12 16:51:30 +0530811
812 lmac = &bgx->lmac[lmacid];
Sunil Goutham57aaf632016-08-12 16:51:31 +0530813 dlm = (lmacid / 2) + (bgx->bgx_id * 2);
814 if (!bgx->is_81xx)
815 sprintf(str, "BGX%d QLM mode", bgx->bgx_id);
816 else
817 sprintf(str, "BGX%d DLM%d mode", bgx->bgx_id, dlm);
Sunil Goutham0bcb7d52016-08-12 16:51:30 +0530818
819 switch (lmac->lmac_type) {
820 case BGX_MODE_SGMII:
821 dev_info(dev, "%s: SGMII\n", (char *)str);
822 break;
823 case BGX_MODE_XAUI:
824 dev_info(dev, "%s: XAUI\n", (char *)str);
825 break;
826 case BGX_MODE_RXAUI:
827 dev_info(dev, "%s: RXAUI\n", (char *)str);
828 break;
829 case BGX_MODE_XFI:
830 if (!lmac->use_training)
831 dev_info(dev, "%s: XFI\n", (char *)str);
832 else
833 dev_info(dev, "%s: 10G_KR\n", (char *)str);
834 break;
835 case BGX_MODE_XLAUI:
836 if (!lmac->use_training)
837 dev_info(dev, "%s: XLAUI\n", (char *)str);
838 else
839 dev_info(dev, "%s: 40G_KR4\n", (char *)str);
840 break;
841 default:
842 dev_info(dev, "%s: INVALID\n", (char *)str);
843 }
844}
845
846static void lmac_set_lane2sds(struct lmac *lmac)
847{
848 switch (lmac->lmac_type) {
849 case BGX_MODE_SGMII:
850 case BGX_MODE_XFI:
851 lmac->lane_to_sds = lmac->lmacid;
852 break;
853 case BGX_MODE_XAUI:
854 case BGX_MODE_XLAUI:
855 lmac->lane_to_sds = 0xE4;
856 break;
857 case BGX_MODE_RXAUI:
858 lmac->lane_to_sds = (lmac->lmacid) ? 0xE : 0x4;
859 break;
860 default:
861 lmac->lane_to_sds = 0;
862 break;
863 }
864}
865
866static void bgx_set_lmac_config(struct bgx *bgx, u8 idx)
867{
868 struct lmac *lmac;
Sunil Goutham57aaf632016-08-12 16:51:31 +0530869 struct lmac *olmac;
Sunil Goutham0bcb7d52016-08-12 16:51:30 +0530870 u64 cmr_cfg;
Sunil Goutham57aaf632016-08-12 16:51:31 +0530871 u8 lmac_type;
872 u8 lane_to_sds;
Sunil Goutham0bcb7d52016-08-12 16:51:30 +0530873
874 lmac = &bgx->lmac[idx];
Sunil Goutham4863dea2015-05-26 19:20:15 -0700875
Sunil Goutham57aaf632016-08-12 16:51:31 +0530876 if (!bgx->is_81xx) {
877 /* Read LMAC0 type to figure out QLM mode
878 * This is configured by low level firmware
879 */
880 cmr_cfg = bgx_reg_read(bgx, 0, BGX_CMRX_CFG);
881 lmac->lmac_type = (cmr_cfg >> 8) & 0x07;
882 lmac->use_training =
883 bgx_reg_read(bgx, 0, BGX_SPUX_BR_PMD_CRTL) &
884 SPU_PMD_CRTL_TRAIN_EN;
885 lmac_set_lane2sds(lmac);
886 return;
887 }
888
889 /* On 81xx BGX can be split across 2 DLMs
890 * firmware programs lmac_type of LMAC0 and LMAC2
Sunil Goutham4863dea2015-05-26 19:20:15 -0700891 */
Sunil Goutham57aaf632016-08-12 16:51:31 +0530892 if ((idx == 0) || (idx == 2)) {
893 cmr_cfg = bgx_reg_read(bgx, idx, BGX_CMRX_CFG);
894 lmac_type = (u8)((cmr_cfg >> 8) & 0x07);
895 lane_to_sds = (u8)(cmr_cfg & 0xFF);
896 /* Check if config is not reset value */
897 if ((lmac_type == 0) && (lane_to_sds == 0xE4))
898 lmac->lmac_type = BGX_MODE_INVALID;
899 else
900 lmac->lmac_type = lmac_type;
901 lmac->use_training =
902 bgx_reg_read(bgx, idx, BGX_SPUX_BR_PMD_CRTL) &
903 SPU_PMD_CRTL_TRAIN_EN;
904 lmac_set_lane2sds(lmac);
905
906 /* Set LMAC type of other lmac on same DLM i.e LMAC 1/3 */
907 olmac = &bgx->lmac[idx + 1];
908 olmac->lmac_type = lmac->lmac_type;
909 olmac->use_training =
910 bgx_reg_read(bgx, idx + 1, BGX_SPUX_BR_PMD_CRTL) &
Sunil Goutham0bcb7d52016-08-12 16:51:30 +0530911 SPU_PMD_CRTL_TRAIN_EN;
Sunil Goutham57aaf632016-08-12 16:51:31 +0530912 lmac_set_lane2sds(olmac);
913 }
914}
915
916static bool is_dlm0_in_bgx_mode(struct bgx *bgx)
917{
918 struct lmac *lmac;
919
920 if (!bgx->is_81xx)
921 return true;
922
923 lmac = &bgx->lmac[1];
924 if (lmac->lmac_type == BGX_MODE_INVALID)
925 return false;
926
927 return true;
Sunil Goutham0bcb7d52016-08-12 16:51:30 +0530928}
Sunil Goutham4863dea2015-05-26 19:20:15 -0700929
Sunil Goutham0bcb7d52016-08-12 16:51:30 +0530930static void bgx_get_qlm_mode(struct bgx *bgx)
931{
Sunil Goutham57aaf632016-08-12 16:51:31 +0530932 struct lmac *lmac;
933 struct lmac *lmac01;
934 struct lmac *lmac23;
Sunil Goutham0bcb7d52016-08-12 16:51:30 +0530935 u8 idx;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700936
Sunil Goutham57aaf632016-08-12 16:51:31 +0530937 /* Init all LMAC's type to invalid */
938 for (idx = 0; idx < MAX_LMAC_PER_BGX; idx++) {
939 lmac = &bgx->lmac[idx];
940 lmac->lmac_type = BGX_MODE_INVALID;
941 lmac->lmacid = idx;
942 }
943
Sunil Goutham0bcb7d52016-08-12 16:51:30 +0530944 /* It is assumed that low level firmware sets this value */
945 bgx->lmac_count = bgx_reg_read(bgx, 0, BGX_CMR_RX_LMACS) & 0x7;
946 if (bgx->lmac_count > MAX_LMAC_PER_BGX)
947 bgx->lmac_count = MAX_LMAC_PER_BGX;
948
949 for (idx = 0; idx < bgx->lmac_count; idx++)
950 bgx_set_lmac_config(bgx, idx);
Sunil Goutham57aaf632016-08-12 16:51:31 +0530951
952 if (!bgx->is_81xx) {
953 bgx_print_qlm_mode(bgx, 0);
954 return;
955 }
956
957 if (bgx->lmac_count) {
958 bgx_print_qlm_mode(bgx, 0);
959 bgx_print_qlm_mode(bgx, 2);
960 }
961
962 /* If DLM0 is not in BGX mode then LMAC0/1 have
963 * to be configured with serdes lanes of DLM1
964 */
965 if (is_dlm0_in_bgx_mode(bgx) || (bgx->lmac_count > 2))
966 return;
967 for (idx = 0; idx < bgx->lmac_count; idx++) {
968 lmac01 = &bgx->lmac[idx];
969 lmac23 = &bgx->lmac[idx + 2];
970 lmac01->lmac_type = lmac23->lmac_type;
971 lmac01->lane_to_sds = lmac23->lane_to_sds;
972 }
Sunil Goutham4863dea2015-05-26 19:20:15 -0700973}
974
David Daney46b903a2015-08-10 17:58:37 -0700975#ifdef CONFIG_ACPI
976
Robert Richter1d82efa2016-02-11 21:50:25 +0530977static int acpi_get_mac_address(struct device *dev, struct acpi_device *adev,
978 u8 *dst)
David Daney46b903a2015-08-10 17:58:37 -0700979{
980 u8 mac[ETH_ALEN];
981 int ret;
982
983 ret = fwnode_property_read_u8_array(acpi_fwnode_handle(adev),
984 "mac-address", mac, ETH_ALEN);
985 if (ret)
986 goto out;
987
988 if (!is_valid_ether_addr(mac)) {
Robert Richter1d82efa2016-02-11 21:50:25 +0530989 dev_err(dev, "MAC address invalid: %pM\n", mac);
David Daney46b903a2015-08-10 17:58:37 -0700990 ret = -EINVAL;
991 goto out;
992 }
993
Robert Richter1d82efa2016-02-11 21:50:25 +0530994 dev_info(dev, "MAC address set to: %pM\n", mac);
995
David Daney46b903a2015-08-10 17:58:37 -0700996 memcpy(dst, mac, ETH_ALEN);
997out:
998 return ret;
999}
1000
1001/* Currently only sets the MAC address. */
1002static acpi_status bgx_acpi_register_phy(acpi_handle handle,
1003 u32 lvl, void *context, void **rv)
1004{
1005 struct bgx *bgx = context;
Robert Richter1d82efa2016-02-11 21:50:25 +05301006 struct device *dev = &bgx->pdev->dev;
David Daney46b903a2015-08-10 17:58:37 -07001007 struct acpi_device *adev;
1008
1009 if (acpi_bus_get_device(handle, &adev))
1010 goto out;
1011
Robert Richter1d82efa2016-02-11 21:50:25 +05301012 acpi_get_mac_address(dev, adev, bgx->lmac[bgx->lmac_count].mac);
David Daney46b903a2015-08-10 17:58:37 -07001013
Robert Richter1d82efa2016-02-11 21:50:25 +05301014 SET_NETDEV_DEV(&bgx->lmac[bgx->lmac_count].netdev, dev);
David Daney46b903a2015-08-10 17:58:37 -07001015
1016 bgx->lmac[bgx->lmac_count].lmacid = bgx->lmac_count;
1017out:
1018 bgx->lmac_count++;
1019 return AE_OK;
1020}
1021
1022static acpi_status bgx_acpi_match_id(acpi_handle handle, u32 lvl,
1023 void *context, void **ret_val)
1024{
1025 struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
1026 struct bgx *bgx = context;
1027 char bgx_sel[5];
1028
1029 snprintf(bgx_sel, 5, "BGX%d", bgx->bgx_id);
1030 if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &string))) {
1031 pr_warn("Invalid link device\n");
1032 return AE_OK;
1033 }
1034
1035 if (strncmp(string.pointer, bgx_sel, 4))
1036 return AE_OK;
1037
1038 acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
1039 bgx_acpi_register_phy, NULL, bgx, NULL);
1040
1041 kfree(string.pointer);
1042 return AE_CTRL_TERMINATE;
1043}
1044
1045static int bgx_init_acpi_phy(struct bgx *bgx)
1046{
1047 acpi_get_devices(NULL, bgx_acpi_match_id, bgx, (void **)NULL);
1048 return 0;
1049}
1050
1051#else
1052
1053static int bgx_init_acpi_phy(struct bgx *bgx)
1054{
1055 return -ENODEV;
1056}
1057
1058#endif /* CONFIG_ACPI */
1059
Robert Richterde387e12015-08-10 17:58:36 -07001060#if IS_ENABLED(CONFIG_OF_MDIO)
1061
1062static int bgx_init_of_phy(struct bgx *bgx)
Sunil Goutham4863dea2015-05-26 19:20:15 -07001063{
David Daneyeee326f2016-02-11 21:50:24 +05301064 struct fwnode_handle *fwn;
David Daneyb7d3e3d2016-03-14 17:30:39 -07001065 struct device_node *node = NULL;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001066 u8 lmac = 0;
Robert Richterde387e12015-08-10 17:58:36 -07001067
David Daneyeee326f2016-02-11 21:50:24 +05301068 device_for_each_child_node(&bgx->pdev->dev, fwn) {
David Daney5fc7cf12016-03-11 09:53:09 -08001069 struct phy_device *pd;
David Daneyeee326f2016-02-11 21:50:24 +05301070 struct device_node *phy_np;
David Daneyb7d3e3d2016-03-14 17:30:39 -07001071 const char *mac;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001072
David Daney5fc7cf12016-03-11 09:53:09 -08001073 /* Should always be an OF node. But if it is not, we
1074 * cannot handle it, so exit the loop.
David Daneyeee326f2016-02-11 21:50:24 +05301075 */
David Daneyb7d3e3d2016-03-14 17:30:39 -07001076 node = to_of_node(fwn);
David Daneyeee326f2016-02-11 21:50:24 +05301077 if (!node)
1078 break;
1079
David Daneyeee326f2016-02-11 21:50:24 +05301080 mac = of_get_mac_address(node);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001081 if (mac)
1082 ether_addr_copy(bgx->lmac[lmac].mac, mac);
1083
1084 SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev);
1085 bgx->lmac[lmac].lmacid = lmac;
David Daney5fc7cf12016-03-11 09:53:09 -08001086
1087 phy_np = of_parse_phandle(node, "phy-handle", 0);
1088 /* If there is no phy or defective firmware presents
1089 * this cortina phy, for which there is no driver
1090 * support, ignore it.
1091 */
1092 if (phy_np &&
1093 !of_device_is_compatible(phy_np, "cortina,cs4223-slice")) {
1094 /* Wait until the phy drivers are available */
1095 pd = of_phy_find_device(phy_np);
1096 if (!pd)
David Daneyb7d3e3d2016-03-14 17:30:39 -07001097 goto defer;
David Daney5fc7cf12016-03-11 09:53:09 -08001098 bgx->lmac[lmac].phydev = pd;
1099 }
1100
Sunil Goutham4863dea2015-05-26 19:20:15 -07001101 lmac++;
David Daney65c66af2016-04-08 13:37:27 -07001102 if (lmac == MAX_LMAC_PER_BGX) {
1103 of_node_put(node);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001104 break;
David Daney65c66af2016-04-08 13:37:27 -07001105 }
Sunil Goutham4863dea2015-05-26 19:20:15 -07001106 }
Robert Richterde387e12015-08-10 17:58:36 -07001107 return 0;
David Daneyb7d3e3d2016-03-14 17:30:39 -07001108
1109defer:
1110 /* We are bailing out, try not to leak device reference counts
1111 * for phy devices we may have already found.
1112 */
1113 while (lmac) {
1114 if (bgx->lmac[lmac].phydev) {
1115 put_device(&bgx->lmac[lmac].phydev->mdio.dev);
1116 bgx->lmac[lmac].phydev = NULL;
1117 }
1118 lmac--;
1119 }
1120 of_node_put(node);
1121 return -EPROBE_DEFER;
Robert Richterde387e12015-08-10 17:58:36 -07001122}
1123
1124#else
1125
1126static int bgx_init_of_phy(struct bgx *bgx)
1127{
1128 return -ENODEV;
1129}
1130
1131#endif /* CONFIG_OF_MDIO */
1132
1133static int bgx_init_phy(struct bgx *bgx)
1134{
David Daney46b903a2015-08-10 17:58:37 -07001135 if (!acpi_disabled)
1136 return bgx_init_acpi_phy(bgx);
1137
Robert Richterde387e12015-08-10 17:58:36 -07001138 return bgx_init_of_phy(bgx);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001139}
1140
1141static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1142{
1143 int err;
1144 struct device *dev = &pdev->dev;
1145 struct bgx *bgx = NULL;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001146 u8 lmac;
Sunil Goutham57aaf632016-08-12 16:51:31 +05301147 u16 sdevid;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001148
1149 bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL);
1150 if (!bgx)
1151 return -ENOMEM;
1152 bgx->pdev = pdev;
1153
1154 pci_set_drvdata(pdev, bgx);
1155
1156 err = pci_enable_device(pdev);
1157 if (err) {
1158 dev_err(dev, "Failed to enable PCI device\n");
1159 pci_set_drvdata(pdev, NULL);
1160 return err;
1161 }
1162
1163 err = pci_request_regions(pdev, DRV_NAME);
1164 if (err) {
1165 dev_err(dev, "PCI request regions failed 0x%x\n", err);
1166 goto err_disable_device;
1167 }
1168
Sunil Goutham57aaf632016-08-12 16:51:31 +05301169 pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sdevid);
1170 if (sdevid == PCI_SUBSYS_DEVID_81XX_BGX)
1171 bgx->is_81xx = true;
1172
Sunil Goutham4863dea2015-05-26 19:20:15 -07001173 /* MAP configuration registers */
1174 bgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
1175 if (!bgx->reg_base) {
1176 dev_err(dev, "BGX: Cannot map CSR memory space, aborting\n");
1177 err = -ENOMEM;
1178 goto err_release_regions;
1179 }
1180 bgx->bgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) & 1;
Robert Richterd768b672015-06-02 11:00:18 -07001181 bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_CN88XX;
1182
Sunil Goutham4863dea2015-05-26 19:20:15 -07001183 bgx_vnic[bgx->bgx_id] = bgx;
1184 bgx_get_qlm_mode(bgx);
1185
Robert Richterde387e12015-08-10 17:58:36 -07001186 err = bgx_init_phy(bgx);
1187 if (err)
1188 goto err_enable;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001189
1190 bgx_init_hw(bgx);
1191
1192 /* Enable all LMACs */
1193 for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
1194 err = bgx_lmac_enable(bgx, lmac);
1195 if (err) {
1196 dev_err(dev, "BGX%d failed to enable lmac%d\n",
1197 bgx->bgx_id, lmac);
Sunil Goutham57aaf632016-08-12 16:51:31 +05301198 while (lmac)
1199 bgx_lmac_disable(bgx, --lmac);
Sunil Goutham4863dea2015-05-26 19:20:15 -07001200 goto err_enable;
1201 }
1202 }
1203
1204 return 0;
1205
1206err_enable:
1207 bgx_vnic[bgx->bgx_id] = NULL;
1208err_release_regions:
1209 pci_release_regions(pdev);
1210err_disable_device:
1211 pci_disable_device(pdev);
1212 pci_set_drvdata(pdev, NULL);
1213 return err;
1214}
1215
1216static void bgx_remove(struct pci_dev *pdev)
1217{
1218 struct bgx *bgx = pci_get_drvdata(pdev);
1219 u8 lmac;
1220
1221 /* Disable all LMACs */
1222 for (lmac = 0; lmac < bgx->lmac_count; lmac++)
1223 bgx_lmac_disable(bgx, lmac);
1224
1225 bgx_vnic[bgx->bgx_id] = NULL;
1226 pci_release_regions(pdev);
1227 pci_disable_device(pdev);
1228 pci_set_drvdata(pdev, NULL);
1229}
1230
1231static struct pci_driver bgx_driver = {
1232 .name = DRV_NAME,
1233 .id_table = bgx_id_table,
1234 .probe = bgx_probe,
1235 .remove = bgx_remove,
1236};
1237
1238static int __init bgx_init_module(void)
1239{
1240 pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
1241
1242 return pci_register_driver(&bgx_driver);
1243}
1244
1245static void __exit bgx_cleanup_module(void)
1246{
1247 pci_unregister_driver(&bgx_driver);
1248}
1249
1250module_init(bgx_init_module);
1251module_exit(bgx_cleanup_module);