blob: 5e541862f65e52a14be8d6e1de95b3eca5efaf2f [file] [log] [blame]
Sunil Goutham4863dea2015-05-26 19:20:15 -07001/*
2 * Copyright (C) 2015 Cavium, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
7 */
8
David Daney46b903a2015-08-10 17:58:37 -07009#include <linux/acpi.h>
Sunil Goutham4863dea2015-05-26 19:20:15 -070010#include <linux/module.h>
11#include <linux/interrupt.h>
12#include <linux/pci.h>
13#include <linux/netdevice.h>
14#include <linux/etherdevice.h>
15#include <linux/phy.h>
16#include <linux/of.h>
17#include <linux/of_mdio.h>
18#include <linux/of_net.h>
19
20#include "nic_reg.h"
21#include "nic.h"
22#include "thunder_bgx.h"
23
24#define DRV_NAME "thunder-BGX"
25#define DRV_VERSION "1.0"
26
27struct lmac {
28 struct bgx *bgx;
29 int dmac;
David Daney46b903a2015-08-10 17:58:37 -070030 u8 mac[ETH_ALEN];
Sunil Goutham4863dea2015-05-26 19:20:15 -070031 bool link_up;
32 int lmacid; /* ID within BGX */
33 int lmacid_bd; /* ID on board */
34 struct net_device netdev;
35 struct phy_device *phydev;
36 unsigned int last_duplex;
37 unsigned int last_link;
38 unsigned int last_speed;
39 bool is_sgmii;
40 struct delayed_work dwork;
41 struct workqueue_struct *check_link;
Aleksey Makarov0c886a12015-06-02 11:00:22 -070042};
Sunil Goutham4863dea2015-05-26 19:20:15 -070043
44struct bgx {
45 u8 bgx_id;
46 u8 qlm_mode;
47 struct lmac lmac[MAX_LMAC_PER_BGX];
48 int lmac_count;
49 int lmac_type;
50 int lane_to_sds;
51 int use_training;
52 void __iomem *reg_base;
53 struct pci_dev *pdev;
Aleksey Makarov0c886a12015-06-02 11:00:22 -070054};
Sunil Goutham4863dea2015-05-26 19:20:15 -070055
Aleksey Makarovfd7ec062015-06-02 11:00:23 -070056static struct bgx *bgx_vnic[MAX_BGX_THUNDER];
Sunil Goutham4863dea2015-05-26 19:20:15 -070057static int lmac_count; /* Total no of LMACs in system */
58
59static int bgx_xaui_check_link(struct lmac *lmac);
60
61/* Supported devices */
62static const struct pci_device_id bgx_id_table[] = {
63 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_BGX) },
64 { 0, } /* end of table */
65};
66
67MODULE_AUTHOR("Cavium Inc");
68MODULE_DESCRIPTION("Cavium Thunder BGX/MAC Driver");
69MODULE_LICENSE("GPL v2");
70MODULE_VERSION(DRV_VERSION);
71MODULE_DEVICE_TABLE(pci, bgx_id_table);
72
73/* The Cavium ThunderX network controller can *only* be found in SoCs
74 * containing the ThunderX ARM64 CPU implementation. All accesses to the device
75 * registers on this platform are implicitly strongly ordered with respect
76 * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
77 * with no memory barriers in this driver. The readq()/writeq() functions add
78 * explicit ordering operation which in this case are redundant, and only
79 * add overhead.
80 */
81
82/* Register read/write APIs */
83static u64 bgx_reg_read(struct bgx *bgx, u8 lmac, u64 offset)
84{
85 void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
86
87 return readq_relaxed(addr);
88}
89
90static void bgx_reg_write(struct bgx *bgx, u8 lmac, u64 offset, u64 val)
91{
92 void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
93
94 writeq_relaxed(val, addr);
95}
96
97static void bgx_reg_modify(struct bgx *bgx, u8 lmac, u64 offset, u64 val)
98{
99 void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
100
101 writeq_relaxed(val | readq_relaxed(addr), addr);
102}
103
104static int bgx_poll_reg(struct bgx *bgx, u8 lmac, u64 reg, u64 mask, bool zero)
105{
106 int timeout = 100;
107 u64 reg_val;
108
109 while (timeout) {
110 reg_val = bgx_reg_read(bgx, lmac, reg);
111 if (zero && !(reg_val & mask))
112 return 0;
113 if (!zero && (reg_val & mask))
114 return 0;
115 usleep_range(1000, 2000);
116 timeout--;
117 }
118 return 1;
119}
120
121/* Return number of BGX present in HW */
122unsigned bgx_get_map(int node)
123{
124 int i;
125 unsigned map = 0;
126
127 for (i = 0; i < MAX_BGX_PER_CN88XX; i++) {
128 if (bgx_vnic[(node * MAX_BGX_PER_CN88XX) + i])
129 map |= (1 << i);
130 }
131
132 return map;
133}
134EXPORT_SYMBOL(bgx_get_map);
135
136/* Return number of LMAC configured for this BGX */
137int bgx_get_lmac_count(int node, int bgx_idx)
138{
139 struct bgx *bgx;
140
141 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
142 if (bgx)
143 return bgx->lmac_count;
144
145 return 0;
146}
147EXPORT_SYMBOL(bgx_get_lmac_count);
148
149/* Returns the current link status of LMAC */
150void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status)
151{
152 struct bgx_link_status *link = (struct bgx_link_status *)status;
153 struct bgx *bgx;
154 struct lmac *lmac;
155
156 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
157 if (!bgx)
158 return;
159
160 lmac = &bgx->lmac[lmacid];
161 link->link_up = lmac->link_up;
162 link->duplex = lmac->last_duplex;
163 link->speed = lmac->last_speed;
164}
165EXPORT_SYMBOL(bgx_get_lmac_link_state);
166
Aleksey Makarove610cb32015-06-02 11:00:21 -0700167const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid)
Sunil Goutham4863dea2015-05-26 19:20:15 -0700168{
169 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
170
171 if (bgx)
172 return bgx->lmac[lmacid].mac;
173
174 return NULL;
175}
176EXPORT_SYMBOL(bgx_get_lmac_mac);
177
Aleksey Makarove610cb32015-06-02 11:00:21 -0700178void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac)
Sunil Goutham4863dea2015-05-26 19:20:15 -0700179{
180 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
181
182 if (!bgx)
183 return;
184
185 ether_addr_copy(bgx->lmac[lmacid].mac, mac);
186}
187EXPORT_SYMBOL(bgx_set_lmac_mac);
188
189static void bgx_sgmii_change_link_state(struct lmac *lmac)
190{
191 struct bgx *bgx = lmac->bgx;
192 u64 cmr_cfg;
193 u64 port_cfg = 0;
194 u64 misc_ctl = 0;
195
196 cmr_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG);
197 cmr_cfg &= ~CMR_EN;
198 bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
199
200 port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
201 misc_ctl = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL);
202
203 if (lmac->link_up) {
204 misc_ctl &= ~PCS_MISC_CTL_GMX_ENO;
205 port_cfg &= ~GMI_PORT_CFG_DUPLEX;
206 port_cfg |= (lmac->last_duplex << 2);
207 } else {
208 misc_ctl |= PCS_MISC_CTL_GMX_ENO;
209 }
210
211 switch (lmac->last_speed) {
212 case 10:
213 port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
214 port_cfg |= GMI_PORT_CFG_SPEED_MSB; /* speed_msb 1 */
215 port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
216 misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
217 misc_ctl |= 50; /* samp_pt */
218 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
219 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
220 break;
221 case 100:
222 port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
223 port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
224 port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
225 misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
226 misc_ctl |= 5; /* samp_pt */
227 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
228 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
229 break;
230 case 1000:
231 port_cfg |= GMI_PORT_CFG_SPEED; /* speed 1 */
232 port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
233 port_cfg |= GMI_PORT_CFG_SLOT_TIME; /* slottime 1 */
234 misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
235 misc_ctl |= 1; /* samp_pt */
236 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 512);
237 if (lmac->last_duplex)
238 bgx_reg_write(bgx, lmac->lmacid,
239 BGX_GMP_GMI_TXX_BURST, 0);
240 else
241 bgx_reg_write(bgx, lmac->lmacid,
242 BGX_GMP_GMI_TXX_BURST, 8192);
243 break;
244 default:
245 break;
246 }
247 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl);
248 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg);
249
250 port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
251
252 /* renable lmac */
253 cmr_cfg |= CMR_EN;
254 bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
255}
256
Aleksey Makarovfd7ec062015-06-02 11:00:23 -0700257static void bgx_lmac_handler(struct net_device *netdev)
Sunil Goutham4863dea2015-05-26 19:20:15 -0700258{
259 struct lmac *lmac = container_of(netdev, struct lmac, netdev);
260 struct phy_device *phydev = lmac->phydev;
261 int link_changed = 0;
262
263 if (!lmac)
264 return;
265
266 if (!phydev->link && lmac->last_link)
267 link_changed = -1;
268
269 if (phydev->link &&
270 (lmac->last_duplex != phydev->duplex ||
271 lmac->last_link != phydev->link ||
272 lmac->last_speed != phydev->speed)) {
273 link_changed = 1;
274 }
275
276 lmac->last_link = phydev->link;
277 lmac->last_speed = phydev->speed;
278 lmac->last_duplex = phydev->duplex;
279
280 if (!link_changed)
281 return;
282
283 if (link_changed > 0)
284 lmac->link_up = true;
285 else
286 lmac->link_up = false;
287
288 if (lmac->is_sgmii)
289 bgx_sgmii_change_link_state(lmac);
290 else
291 bgx_xaui_check_link(lmac);
292}
293
294u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx)
295{
296 struct bgx *bgx;
297
298 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
299 if (!bgx)
300 return 0;
301
302 if (idx > 8)
303 lmac = 0;
304 return bgx_reg_read(bgx, lmac, BGX_CMRX_RX_STAT0 + (idx * 8));
305}
306EXPORT_SYMBOL(bgx_get_rx_stats);
307
308u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx)
309{
310 struct bgx *bgx;
311
312 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
313 if (!bgx)
314 return 0;
315
316 return bgx_reg_read(bgx, lmac, BGX_CMRX_TX_STAT0 + (idx * 8));
317}
318EXPORT_SYMBOL(bgx_get_tx_stats);
319
320static void bgx_flush_dmac_addrs(struct bgx *bgx, int lmac)
321{
322 u64 offset;
323
324 while (bgx->lmac[lmac].dmac > 0) {
325 offset = ((bgx->lmac[lmac].dmac - 1) * sizeof(u64)) +
326 (lmac * MAX_DMAC_PER_LMAC * sizeof(u64));
327 bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + offset, 0);
328 bgx->lmac[lmac].dmac--;
329 }
330}
331
332static int bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid)
333{
334 u64 cfg;
335
336 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_THRESH, 0x30);
337 /* max packet size */
338 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_RXX_JABBER, MAX_FRAME_SIZE);
339
340 /* Disable frame alignment if using preamble */
341 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
342 if (cfg & 1)
343 bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SGMII_CTL, 0);
344
345 /* Enable lmac */
346 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
347
348 /* PCS reset */
349 bgx_reg_modify(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_RESET);
350 if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_CTL,
351 PCS_MRX_CTL_RESET, true)) {
352 dev_err(&bgx->pdev->dev, "BGX PCS reset not completed\n");
353 return -1;
354 }
355
356 /* power down, reset autoneg, autoneg enable */
357 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL);
358 cfg &= ~PCS_MRX_CTL_PWR_DN;
359 cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN);
360 bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg);
361
362 if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS,
363 PCS_MRX_STATUS_AN_CPT, false)) {
364 dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n");
365 return -1;
366 }
367
368 return 0;
369}
370
371static int bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type)
372{
373 u64 cfg;
374
375 /* Reset SPU */
376 bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET);
377 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) {
378 dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n");
379 return -1;
380 }
381
382 /* Disable LMAC */
383 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
384 cfg &= ~CMR_EN;
385 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
386
387 bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
388 /* Set interleaved running disparity for RXAUI */
389 if (bgx->lmac_type != BGX_MODE_RXAUI)
390 bgx_reg_modify(bgx, lmacid,
391 BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
392 else
393 bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL,
394 SPU_MISC_CTL_RX_DIS | SPU_MISC_CTL_INTLV_RDISP);
395
396 /* clear all interrupts */
397 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_INT);
398 bgx_reg_write(bgx, lmacid, BGX_SMUX_RX_INT, cfg);
399 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_INT);
400 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_INT, cfg);
401 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
402 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
403
404 if (bgx->use_training) {
405 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LP_CUP, 0x00);
406 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_CUP, 0x00);
407 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_REP, 0x00);
408 /* training enable */
409 bgx_reg_modify(bgx, lmacid,
410 BGX_SPUX_BR_PMD_CRTL, SPU_PMD_CRTL_TRAIN_EN);
411 }
412
413 /* Append FCS to each packet */
414 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, SMU_TX_APPEND_FCS_D);
415
416 /* Disable forward error correction */
417 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_FEC_CONTROL);
418 cfg &= ~SPU_FEC_CTL_FEC_EN;
419 bgx_reg_write(bgx, lmacid, BGX_SPUX_FEC_CONTROL, cfg);
420
421 /* Disable autoneg */
422 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_CONTROL);
423 cfg = cfg & ~(SPU_AN_CTL_AN_EN | SPU_AN_CTL_XNP_EN);
424 bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_CONTROL, cfg);
425
426 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_ADV);
427 if (bgx->lmac_type == BGX_MODE_10G_KR)
428 cfg |= (1 << 23);
429 else if (bgx->lmac_type == BGX_MODE_40G_KR)
430 cfg |= (1 << 24);
431 else
432 cfg &= ~((1 << 23) | (1 << 24));
433 cfg = cfg & (~((1ULL << 25) | (1ULL << 22) | (1ULL << 12)));
434 bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_ADV, cfg);
435
436 cfg = bgx_reg_read(bgx, 0, BGX_SPU_DBG_CONTROL);
437 cfg &= ~SPU_DBG_CTL_AN_ARB_LINK_CHK_EN;
438 bgx_reg_write(bgx, 0, BGX_SPU_DBG_CONTROL, cfg);
439
440 /* Enable lmac */
441 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
442
443 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_CONTROL1);
444 cfg &= ~SPU_CTL_LOW_POWER;
445 bgx_reg_write(bgx, lmacid, BGX_SPUX_CONTROL1, cfg);
446
447 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_CTL);
448 cfg &= ~SMU_TX_CTL_UNI_EN;
449 cfg |= SMU_TX_CTL_DIC_EN;
450 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_CTL, cfg);
451
452 /* take lmac_count into account */
453 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_THRESH, (0x100 - 1));
454 /* max packet size */
455 bgx_reg_modify(bgx, lmacid, BGX_SMUX_RX_JABBER, MAX_FRAME_SIZE);
456
457 return 0;
458}
459
460static int bgx_xaui_check_link(struct lmac *lmac)
461{
462 struct bgx *bgx = lmac->bgx;
463 int lmacid = lmac->lmacid;
464 int lmac_type = bgx->lmac_type;
465 u64 cfg;
466
467 bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
468 if (bgx->use_training) {
469 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
470 if (!(cfg & (1ull << 13))) {
471 cfg = (1ull << 13) | (1ull << 14);
472 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
473 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL);
474 cfg |= (1ull << 0);
475 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL, cfg);
476 return -1;
477 }
478 }
479
480 /* wait for PCS to come out of reset */
481 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) {
482 dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n");
483 return -1;
484 }
485
486 if ((lmac_type == BGX_MODE_10G_KR) || (lmac_type == BGX_MODE_XFI) ||
487 (lmac_type == BGX_MODE_40G_KR) || (lmac_type == BGX_MODE_XLAUI)) {
488 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BR_STATUS1,
489 SPU_BR_STATUS_BLK_LOCK, false)) {
490 dev_err(&bgx->pdev->dev,
491 "SPU_BR_STATUS_BLK_LOCK not completed\n");
492 return -1;
493 }
494 } else {
495 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BX_STATUS,
496 SPU_BX_STATUS_RX_ALIGN, false)) {
497 dev_err(&bgx->pdev->dev,
498 "SPU_BX_STATUS_RX_ALIGN not completed\n");
499 return -1;
500 }
501 }
502
503 /* Clear rcvflt bit (latching high) and read it back */
504 bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
505 if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
506 dev_err(&bgx->pdev->dev, "Receive fault, retry training\n");
507 if (bgx->use_training) {
508 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
509 if (!(cfg & (1ull << 13))) {
510 cfg = (1ull << 13) | (1ull << 14);
511 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
512 cfg = bgx_reg_read(bgx, lmacid,
513 BGX_SPUX_BR_PMD_CRTL);
514 cfg |= (1ull << 0);
515 bgx_reg_write(bgx, lmacid,
516 BGX_SPUX_BR_PMD_CRTL, cfg);
517 return -1;
518 }
519 }
520 return -1;
521 }
522
523 /* Wait for MAC RX to be ready */
524 if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_RX_CTL,
525 SMU_RX_CTL_STATUS, true)) {
526 dev_err(&bgx->pdev->dev, "SMU RX link not okay\n");
527 return -1;
528 }
529
530 /* Wait for BGX RX to be idle */
531 if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) {
532 dev_err(&bgx->pdev->dev, "SMU RX not idle\n");
533 return -1;
534 }
535
536 /* Wait for BGX TX to be idle */
537 if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_TX_IDLE, false)) {
538 dev_err(&bgx->pdev->dev, "SMU TX not idle\n");
539 return -1;
540 }
541
542 if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
543 dev_err(&bgx->pdev->dev, "Receive fault\n");
544 return -1;
545 }
546
547 /* Receive link is latching low. Force it high and verify it */
548 bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
549 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_STATUS1,
550 SPU_STATUS1_RCV_LNK, false)) {
551 dev_err(&bgx->pdev->dev, "SPU receive link down\n");
552 return -1;
553 }
554
555 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
556 cfg &= ~SPU_MISC_CTL_RX_DIS;
557 bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
558 return 0;
559}
560
561static void bgx_poll_for_link(struct work_struct *work)
562{
563 struct lmac *lmac;
564 u64 link;
565
566 lmac = container_of(work, struct lmac, dwork.work);
567
568 /* Receive link is latching low. Force it high and verify it */
569 bgx_reg_modify(lmac->bgx, lmac->lmacid,
570 BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
571 bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1,
572 SPU_STATUS1_RCV_LNK, false);
573
574 link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
575 if (link & SPU_STATUS1_RCV_LNK) {
576 lmac->link_up = 1;
577 if (lmac->bgx->lmac_type == BGX_MODE_XLAUI)
578 lmac->last_speed = 40000;
579 else
580 lmac->last_speed = 10000;
581 lmac->last_duplex = 1;
582 } else {
583 lmac->link_up = 0;
584 }
585
586 if (lmac->last_link != lmac->link_up) {
587 lmac->last_link = lmac->link_up;
588 if (lmac->link_up)
589 bgx_xaui_check_link(lmac);
590 }
591
592 queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2);
593}
594
595static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
596{
597 struct lmac *lmac;
598 u64 cfg;
599
600 lmac = &bgx->lmac[lmacid];
601 lmac->bgx = bgx;
602
603 if (bgx->lmac_type == BGX_MODE_SGMII) {
604 lmac->is_sgmii = 1;
605 if (bgx_lmac_sgmii_init(bgx, lmacid))
606 return -1;
607 } else {
608 lmac->is_sgmii = 0;
609 if (bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type))
610 return -1;
611 }
612
613 if (lmac->is_sgmii) {
614 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
615 cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */
616 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND, cfg);
617 bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_MIN_PKT, 60 - 1);
618 } else {
619 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_APPEND);
620 cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */
621 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, cfg);
622 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_MIN_PKT, 60 + 4);
623 }
624
625 /* Enable lmac */
626 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG,
627 CMR_EN | CMR_PKT_RX_EN | CMR_PKT_TX_EN);
628
629 /* Restore default cfg, incase low level firmware changed it */
630 bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03);
631
632 if ((bgx->lmac_type != BGX_MODE_XFI) &&
633 (bgx->lmac_type != BGX_MODE_XLAUI) &&
634 (bgx->lmac_type != BGX_MODE_40G_KR) &&
635 (bgx->lmac_type != BGX_MODE_10G_KR)) {
636 if (!lmac->phydev)
637 return -ENODEV;
638
639 lmac->phydev->dev_flags = 0;
640
641 if (phy_connect_direct(&lmac->netdev, lmac->phydev,
642 bgx_lmac_handler,
643 PHY_INTERFACE_MODE_SGMII))
644 return -ENODEV;
645
646 phy_start_aneg(lmac->phydev);
647 } else {
648 lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND |
649 WQ_MEM_RECLAIM, 1);
650 if (!lmac->check_link)
651 return -ENOMEM;
652 INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link);
653 queue_delayed_work(lmac->check_link, &lmac->dwork, 0);
654 }
655
656 return 0;
657}
658
Aleksey Makarovfd7ec062015-06-02 11:00:23 -0700659static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
Sunil Goutham4863dea2015-05-26 19:20:15 -0700660{
661 struct lmac *lmac;
662 u64 cmrx_cfg;
663
664 lmac = &bgx->lmac[lmacid];
665 if (lmac->check_link) {
666 /* Destroy work queue */
667 cancel_delayed_work(&lmac->dwork);
668 flush_workqueue(lmac->check_link);
669 destroy_workqueue(lmac->check_link);
670 }
671
672 cmrx_cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
673 cmrx_cfg &= ~(1 << 15);
674 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg);
675 bgx_flush_dmac_addrs(bgx, lmacid);
676
Thanneeru Srinivasulu60f83c82015-07-29 16:49:46 +0300677 if ((bgx->lmac_type != BGX_MODE_XFI) &&
678 (bgx->lmac_type != BGX_MODE_XLAUI) &&
679 (bgx->lmac_type != BGX_MODE_40G_KR) &&
680 (bgx->lmac_type != BGX_MODE_10G_KR) && lmac->phydev)
Sunil Goutham4863dea2015-05-26 19:20:15 -0700681 phy_disconnect(lmac->phydev);
682
683 lmac->phydev = NULL;
684}
685
686static void bgx_set_num_ports(struct bgx *bgx)
687{
688 u64 lmac_count;
689
690 switch (bgx->qlm_mode) {
691 case QLM_MODE_SGMII:
692 bgx->lmac_count = 4;
693 bgx->lmac_type = BGX_MODE_SGMII;
694 bgx->lane_to_sds = 0;
695 break;
696 case QLM_MODE_XAUI_1X4:
697 bgx->lmac_count = 1;
698 bgx->lmac_type = BGX_MODE_XAUI;
699 bgx->lane_to_sds = 0xE4;
700 break;
701 case QLM_MODE_RXAUI_2X2:
702 bgx->lmac_count = 2;
703 bgx->lmac_type = BGX_MODE_RXAUI;
704 bgx->lane_to_sds = 0xE4;
705 break;
706 case QLM_MODE_XFI_4X1:
707 bgx->lmac_count = 4;
708 bgx->lmac_type = BGX_MODE_XFI;
709 bgx->lane_to_sds = 0;
710 break;
711 case QLM_MODE_XLAUI_1X4:
712 bgx->lmac_count = 1;
713 bgx->lmac_type = BGX_MODE_XLAUI;
714 bgx->lane_to_sds = 0xE4;
715 break;
716 case QLM_MODE_10G_KR_4X1:
717 bgx->lmac_count = 4;
718 bgx->lmac_type = BGX_MODE_10G_KR;
719 bgx->lane_to_sds = 0;
720 bgx->use_training = 1;
721 break;
722 case QLM_MODE_40G_KR4_1X4:
723 bgx->lmac_count = 1;
724 bgx->lmac_type = BGX_MODE_40G_KR;
725 bgx->lane_to_sds = 0xE4;
726 bgx->use_training = 1;
727 break;
728 default:
729 bgx->lmac_count = 0;
730 break;
731 }
732
733 /* Check if low level firmware has programmed LMAC count
734 * based on board type, if yes consider that otherwise
735 * the default static values
736 */
737 lmac_count = bgx_reg_read(bgx, 0, BGX_CMR_RX_LMACS) & 0x7;
738 if (lmac_count != 4)
739 bgx->lmac_count = lmac_count;
740}
741
742static void bgx_init_hw(struct bgx *bgx)
743{
744 int i;
745
746 bgx_set_num_ports(bgx);
747
748 bgx_reg_modify(bgx, 0, BGX_CMR_GLOBAL_CFG, CMR_GLOBAL_CFG_FCS_STRIP);
749 if (bgx_reg_read(bgx, 0, BGX_CMR_BIST_STATUS))
750 dev_err(&bgx->pdev->dev, "BGX%d BIST failed\n", bgx->bgx_id);
751
752 /* Set lmac type and lane2serdes mapping */
753 for (i = 0; i < bgx->lmac_count; i++) {
754 if (bgx->lmac_type == BGX_MODE_RXAUI) {
755 if (i)
756 bgx->lane_to_sds = 0x0e;
757 else
758 bgx->lane_to_sds = 0x04;
759 bgx_reg_write(bgx, i, BGX_CMRX_CFG,
760 (bgx->lmac_type << 8) | bgx->lane_to_sds);
761 continue;
762 }
763 bgx_reg_write(bgx, i, BGX_CMRX_CFG,
764 (bgx->lmac_type << 8) | (bgx->lane_to_sds + i));
765 bgx->lmac[i].lmacid_bd = lmac_count;
766 lmac_count++;
767 }
768
769 bgx_reg_write(bgx, 0, BGX_CMR_TX_LMACS, bgx->lmac_count);
770 bgx_reg_write(bgx, 0, BGX_CMR_RX_LMACS, bgx->lmac_count);
771
772 /* Set the backpressure AND mask */
773 for (i = 0; i < bgx->lmac_count; i++)
774 bgx_reg_modify(bgx, 0, BGX_CMR_CHAN_MSK_AND,
775 ((1ULL << MAX_BGX_CHANS_PER_LMAC) - 1) <<
776 (i * MAX_BGX_CHANS_PER_LMAC));
777
778 /* Disable all MAC filtering */
779 for (i = 0; i < RX_DMAC_COUNT; i++)
780 bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + (i * 8), 0x00);
781
782 /* Disable MAC steering (NCSI traffic) */
783 for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++)
784 bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00);
785}
786
787static void bgx_get_qlm_mode(struct bgx *bgx)
788{
789 struct device *dev = &bgx->pdev->dev;
790 int lmac_type;
791 int train_en;
792
793 /* Read LMAC0 type to figure out QLM mode
794 * This is configured by low level firmware
795 */
796 lmac_type = bgx_reg_read(bgx, 0, BGX_CMRX_CFG);
797 lmac_type = (lmac_type >> 8) & 0x07;
798
799 train_en = bgx_reg_read(bgx, 0, BGX_SPUX_BR_PMD_CRTL) &
800 SPU_PMD_CRTL_TRAIN_EN;
801
802 switch (lmac_type) {
803 case BGX_MODE_SGMII:
804 bgx->qlm_mode = QLM_MODE_SGMII;
805 dev_info(dev, "BGX%d QLM mode: SGMII\n", bgx->bgx_id);
806 break;
807 case BGX_MODE_XAUI:
808 bgx->qlm_mode = QLM_MODE_XAUI_1X4;
809 dev_info(dev, "BGX%d QLM mode: XAUI\n", bgx->bgx_id);
810 break;
811 case BGX_MODE_RXAUI:
812 bgx->qlm_mode = QLM_MODE_RXAUI_2X2;
813 dev_info(dev, "BGX%d QLM mode: RXAUI\n", bgx->bgx_id);
814 break;
815 case BGX_MODE_XFI:
816 if (!train_en) {
817 bgx->qlm_mode = QLM_MODE_XFI_4X1;
818 dev_info(dev, "BGX%d QLM mode: XFI\n", bgx->bgx_id);
819 } else {
820 bgx->qlm_mode = QLM_MODE_10G_KR_4X1;
821 dev_info(dev, "BGX%d QLM mode: 10G_KR\n", bgx->bgx_id);
822 }
823 break;
824 case BGX_MODE_XLAUI:
825 if (!train_en) {
826 bgx->qlm_mode = QLM_MODE_XLAUI_1X4;
827 dev_info(dev, "BGX%d QLM mode: XLAUI\n", bgx->bgx_id);
828 } else {
829 bgx->qlm_mode = QLM_MODE_40G_KR4_1X4;
830 dev_info(dev, "BGX%d QLM mode: 40G_KR4\n", bgx->bgx_id);
831 }
832 break;
833 default:
834 bgx->qlm_mode = QLM_MODE_SGMII;
835 dev_info(dev, "BGX%d QLM default mode: SGMII\n", bgx->bgx_id);
836 }
837}
838
David Daney46b903a2015-08-10 17:58:37 -0700839#ifdef CONFIG_ACPI
840
841static int acpi_get_mac_address(struct acpi_device *adev, u8 *dst)
842{
843 u8 mac[ETH_ALEN];
844 int ret;
845
846 ret = fwnode_property_read_u8_array(acpi_fwnode_handle(adev),
847 "mac-address", mac, ETH_ALEN);
848 if (ret)
849 goto out;
850
851 if (!is_valid_ether_addr(mac)) {
852 ret = -EINVAL;
853 goto out;
854 }
855
856 memcpy(dst, mac, ETH_ALEN);
857out:
858 return ret;
859}
860
861/* Currently only sets the MAC address. */
862static acpi_status bgx_acpi_register_phy(acpi_handle handle,
863 u32 lvl, void *context, void **rv)
864{
865 struct bgx *bgx = context;
866 struct acpi_device *adev;
867
868 if (acpi_bus_get_device(handle, &adev))
869 goto out;
870
871 acpi_get_mac_address(adev, bgx->lmac[bgx->lmac_count].mac);
872
873 SET_NETDEV_DEV(&bgx->lmac[bgx->lmac_count].netdev, &bgx->pdev->dev);
874
875 bgx->lmac[bgx->lmac_count].lmacid = bgx->lmac_count;
876out:
877 bgx->lmac_count++;
878 return AE_OK;
879}
880
881static acpi_status bgx_acpi_match_id(acpi_handle handle, u32 lvl,
882 void *context, void **ret_val)
883{
884 struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
885 struct bgx *bgx = context;
886 char bgx_sel[5];
887
888 snprintf(bgx_sel, 5, "BGX%d", bgx->bgx_id);
889 if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &string))) {
890 pr_warn("Invalid link device\n");
891 return AE_OK;
892 }
893
894 if (strncmp(string.pointer, bgx_sel, 4))
895 return AE_OK;
896
897 acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
898 bgx_acpi_register_phy, NULL, bgx, NULL);
899
900 kfree(string.pointer);
901 return AE_CTRL_TERMINATE;
902}
903
904static int bgx_init_acpi_phy(struct bgx *bgx)
905{
906 acpi_get_devices(NULL, bgx_acpi_match_id, bgx, (void **)NULL);
907 return 0;
908}
909
910#else
911
912static int bgx_init_acpi_phy(struct bgx *bgx)
913{
914 return -ENODEV;
915}
916
917#endif /* CONFIG_ACPI */
918
Robert Richterde387e12015-08-10 17:58:36 -0700919#if IS_ENABLED(CONFIG_OF_MDIO)
920
921static int bgx_init_of_phy(struct bgx *bgx)
Sunil Goutham4863dea2015-05-26 19:20:15 -0700922{
Robert Richterde387e12015-08-10 17:58:36 -0700923 struct device_node *np;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700924 struct device_node *np_child;
925 u8 lmac = 0;
Robert Richterde387e12015-08-10 17:58:36 -0700926 char bgx_sel[5];
927 const char *mac;
928
929 /* Get BGX node from DT */
930 snprintf(bgx_sel, 5, "bgx%d", bgx->bgx_id);
931 np = of_find_node_by_name(NULL, bgx_sel);
932 if (!np)
933 return -ENODEV;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700934
935 for_each_child_of_node(np, np_child) {
Robert Richterde387e12015-08-10 17:58:36 -0700936 struct device_node *phy_np = of_parse_phandle(np_child,
937 "phy-handle", 0);
938 if (!phy_np)
939 continue;
940 bgx->lmac[lmac].phydev = of_phy_find_device(phy_np);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700941
942 mac = of_get_mac_address(np_child);
943 if (mac)
944 ether_addr_copy(bgx->lmac[lmac].mac, mac);
945
946 SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev);
947 bgx->lmac[lmac].lmacid = lmac;
948 lmac++;
949 if (lmac == MAX_LMAC_PER_BGX)
950 break;
951 }
Robert Richterde387e12015-08-10 17:58:36 -0700952 return 0;
953}
954
955#else
956
957static int bgx_init_of_phy(struct bgx *bgx)
958{
959 return -ENODEV;
960}
961
962#endif /* CONFIG_OF_MDIO */
963
964static int bgx_init_phy(struct bgx *bgx)
965{
David Daney46b903a2015-08-10 17:58:37 -0700966 if (!acpi_disabled)
967 return bgx_init_acpi_phy(bgx);
968
Robert Richterde387e12015-08-10 17:58:36 -0700969 return bgx_init_of_phy(bgx);
Sunil Goutham4863dea2015-05-26 19:20:15 -0700970}
971
972static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
973{
974 int err;
975 struct device *dev = &pdev->dev;
976 struct bgx *bgx = NULL;
Sunil Goutham4863dea2015-05-26 19:20:15 -0700977 u8 lmac;
978
979 bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL);
980 if (!bgx)
981 return -ENOMEM;
982 bgx->pdev = pdev;
983
984 pci_set_drvdata(pdev, bgx);
985
986 err = pci_enable_device(pdev);
987 if (err) {
988 dev_err(dev, "Failed to enable PCI device\n");
989 pci_set_drvdata(pdev, NULL);
990 return err;
991 }
992
993 err = pci_request_regions(pdev, DRV_NAME);
994 if (err) {
995 dev_err(dev, "PCI request regions failed 0x%x\n", err);
996 goto err_disable_device;
997 }
998
999 /* MAP configuration registers */
1000 bgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
1001 if (!bgx->reg_base) {
1002 dev_err(dev, "BGX: Cannot map CSR memory space, aborting\n");
1003 err = -ENOMEM;
1004 goto err_release_regions;
1005 }
1006 bgx->bgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) & 1;
Robert Richterd768b672015-06-02 11:00:18 -07001007 bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_CN88XX;
1008
Sunil Goutham4863dea2015-05-26 19:20:15 -07001009 bgx_vnic[bgx->bgx_id] = bgx;
1010 bgx_get_qlm_mode(bgx);
1011
Robert Richterde387e12015-08-10 17:58:36 -07001012 err = bgx_init_phy(bgx);
1013 if (err)
1014 goto err_enable;
Sunil Goutham4863dea2015-05-26 19:20:15 -07001015
1016 bgx_init_hw(bgx);
1017
1018 /* Enable all LMACs */
1019 for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
1020 err = bgx_lmac_enable(bgx, lmac);
1021 if (err) {
1022 dev_err(dev, "BGX%d failed to enable lmac%d\n",
1023 bgx->bgx_id, lmac);
1024 goto err_enable;
1025 }
1026 }
1027
1028 return 0;
1029
1030err_enable:
1031 bgx_vnic[bgx->bgx_id] = NULL;
1032err_release_regions:
1033 pci_release_regions(pdev);
1034err_disable_device:
1035 pci_disable_device(pdev);
1036 pci_set_drvdata(pdev, NULL);
1037 return err;
1038}
1039
1040static void bgx_remove(struct pci_dev *pdev)
1041{
1042 struct bgx *bgx = pci_get_drvdata(pdev);
1043 u8 lmac;
1044
1045 /* Disable all LMACs */
1046 for (lmac = 0; lmac < bgx->lmac_count; lmac++)
1047 bgx_lmac_disable(bgx, lmac);
1048
1049 bgx_vnic[bgx->bgx_id] = NULL;
1050 pci_release_regions(pdev);
1051 pci_disable_device(pdev);
1052 pci_set_drvdata(pdev, NULL);
1053}
1054
1055static struct pci_driver bgx_driver = {
1056 .name = DRV_NAME,
1057 .id_table = bgx_id_table,
1058 .probe = bgx_probe,
1059 .remove = bgx_remove,
1060};
1061
1062static int __init bgx_init_module(void)
1063{
1064 pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
1065
1066 return pci_register_driver(&bgx_driver);
1067}
1068
1069static void __exit bgx_cleanup_module(void)
1070{
1071 pci_unregister_driver(&bgx_driver);
1072}
1073
1074module_init(bgx_init_module);
1075module_exit(bgx_cleanup_module);