blob: 0f209c776929165d273862d64893976575a8ec9b [file] [log] [blame]
Divy Le Ray4d22de32007-01-18 22:04:14 -05001/*
2 * This file is part of the Chelsio T3 Ethernet driver.
3 *
4 * Copyright (C) 2005-2006 Chelsio Communications. All rights reserved.
5 *
6 * This program is distributed in the hope that it will be useful, but WITHOUT
7 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
9 * release for licensing terms and conditions.
10 */
11
12#include "common.h"
13#include "regs.h"
14
15/*
16 * # of exact address filters. The first one is used for the station address,
17 * the rest are available for multicast addresses.
18 */
19#define EXACT_ADDR_FILTERS 8
20
21static inline int macidx(const struct cmac *mac)
22{
23 return mac->offset / (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR);
24}
25
26static void xaui_serdes_reset(struct cmac *mac)
27{
28 static const unsigned int clear[] = {
29 F_PWRDN0 | F_PWRDN1, F_RESETPLL01, F_RESET0 | F_RESET1,
30 F_PWRDN2 | F_PWRDN3, F_RESETPLL23, F_RESET2 | F_RESET3
31 };
32
33 int i;
34 struct adapter *adap = mac->adapter;
35 u32 ctrl = A_XGM_SERDES_CTRL0 + mac->offset;
36
37 t3_write_reg(adap, ctrl, adap->params.vpd.xauicfg[macidx(mac)] |
38 F_RESET3 | F_RESET2 | F_RESET1 | F_RESET0 |
39 F_PWRDN3 | F_PWRDN2 | F_PWRDN1 | F_PWRDN0 |
40 F_RESETPLL23 | F_RESETPLL01);
41 t3_read_reg(adap, ctrl);
42 udelay(15);
43
44 for (i = 0; i < ARRAY_SIZE(clear); i++) {
45 t3_set_reg_field(adap, ctrl, clear[i], 0);
46 udelay(15);
47 }
48}
49
50void t3b_pcs_reset(struct cmac *mac)
51{
52 t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset,
53 F_PCS_RESET_, 0);
54 udelay(20);
55 t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset, 0,
56 F_PCS_RESET_);
57}
58
59int t3_mac_reset(struct cmac *mac)
60{
61 static const struct addr_val_pair mac_reset_avp[] = {
62 {A_XGM_TX_CTRL, 0},
63 {A_XGM_RX_CTRL, 0},
64 {A_XGM_RX_CFG, F_DISPAUSEFRAMES | F_EN1536BFRAMES |
65 F_RMFCS | F_ENJUMBO | F_ENHASHMCAST},
66 {A_XGM_RX_HASH_LOW, 0},
67 {A_XGM_RX_HASH_HIGH, 0},
68 {A_XGM_RX_EXACT_MATCH_LOW_1, 0},
69 {A_XGM_RX_EXACT_MATCH_LOW_2, 0},
70 {A_XGM_RX_EXACT_MATCH_LOW_3, 0},
71 {A_XGM_RX_EXACT_MATCH_LOW_4, 0},
72 {A_XGM_RX_EXACT_MATCH_LOW_5, 0},
73 {A_XGM_RX_EXACT_MATCH_LOW_6, 0},
74 {A_XGM_RX_EXACT_MATCH_LOW_7, 0},
75 {A_XGM_RX_EXACT_MATCH_LOW_8, 0},
76 {A_XGM_STAT_CTRL, F_CLRSTATS}
77 };
78 u32 val;
79 struct adapter *adap = mac->adapter;
80 unsigned int oft = mac->offset;
81
82 t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_);
83 t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
84
85 t3_write_regs(adap, mac_reset_avp, ARRAY_SIZE(mac_reset_avp), oft);
86 t3_set_reg_field(adap, A_XGM_RXFIFO_CFG + oft,
87 F_RXSTRFRWRD | F_DISERRFRAMES,
88 uses_xaui(adap) ? 0 : F_RXSTRFRWRD);
89
90 if (uses_xaui(adap)) {
91 if (adap->params.rev == 0) {
92 t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0,
93 F_RXENABLE | F_TXENABLE);
94 if (t3_wait_op_done(adap, A_XGM_SERDES_STATUS1 + oft,
95 F_CMULOCK, 1, 5, 2)) {
96 CH_ERR(adap,
97 "MAC %d XAUI SERDES CMU lock failed\n",
98 macidx(mac));
99 return -1;
100 }
101 t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0,
102 F_SERDESRESET_);
103 } else
104 xaui_serdes_reset(mac);
105 }
106
107 if (adap->params.rev > 0)
108 t3_write_reg(adap, A_XGM_PAUSE_TIMER + oft, 0xf000);
109
110 val = F_MAC_RESET_;
111 if (is_10G(adap))
112 val |= F_PCS_RESET_;
113 else if (uses_xaui(adap))
114 val |= F_PCS_RESET_ | F_XG2G_RESET_;
115 else
116 val |= F_RGMII_RESET_ | F_XG2G_RESET_;
117 t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val);
118 t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
119 if ((val & F_PCS_RESET_) && adap->params.rev) {
120 msleep(1);
121 t3b_pcs_reset(mac);
122 }
123
124 memset(&mac->stats, 0, sizeof(mac->stats));
125 return 0;
126}
127
128/*
129 * Set the exact match register 'idx' to recognize the given Ethernet address.
130 */
131static void set_addr_filter(struct cmac *mac, int idx, const u8 * addr)
132{
133 u32 addr_lo, addr_hi;
134 unsigned int oft = mac->offset + idx * 8;
135
136 addr_lo = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
137 addr_hi = (addr[5] << 8) | addr[4];
138
139 t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1 + oft, addr_lo);
140 t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_HIGH_1 + oft, addr_hi);
141}
142
143/* Set one of the station's unicast MAC addresses. */
144int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6])
145{
146 if (idx >= mac->nucast)
147 return -EINVAL;
148 set_addr_filter(mac, idx, addr);
149 return 0;
150}
151
152/*
153 * Specify the number of exact address filters that should be reserved for
154 * unicast addresses. Caller should reload the unicast and multicast addresses
155 * after calling this.
156 */
157int t3_mac_set_num_ucast(struct cmac *mac, int n)
158{
159 if (n > EXACT_ADDR_FILTERS)
160 return -EINVAL;
161 mac->nucast = n;
162 return 0;
163}
164
165/* Calculate the RX hash filter index of an Ethernet address */
166static int hash_hw_addr(const u8 * addr)
167{
168 int hash = 0, octet, bit, i = 0, c;
169
170 for (octet = 0; octet < 6; ++octet)
171 for (c = addr[octet], bit = 0; bit < 8; c >>= 1, ++bit) {
172 hash ^= (c & 1) << i;
173 if (++i == 6)
174 i = 0;
175 }
176 return hash;
177}
178
179int t3_mac_set_rx_mode(struct cmac *mac, struct t3_rx_mode *rm)
180{
181 u32 val, hash_lo, hash_hi;
182 struct adapter *adap = mac->adapter;
183 unsigned int oft = mac->offset;
184
185 val = t3_read_reg(adap, A_XGM_RX_CFG + oft) & ~F_COPYALLFRAMES;
186 if (rm->dev->flags & IFF_PROMISC)
187 val |= F_COPYALLFRAMES;
188 t3_write_reg(adap, A_XGM_RX_CFG + oft, val);
189
190 if (rm->dev->flags & IFF_ALLMULTI)
191 hash_lo = hash_hi = 0xffffffff;
192 else {
193 u8 *addr;
194 int exact_addr_idx = mac->nucast;
195
196 hash_lo = hash_hi = 0;
197 while ((addr = t3_get_next_mcaddr(rm)))
198 if (exact_addr_idx < EXACT_ADDR_FILTERS)
199 set_addr_filter(mac, exact_addr_idx++, addr);
200 else {
201 int hash = hash_hw_addr(addr);
202
203 if (hash < 32)
204 hash_lo |= (1 << hash);
205 else
206 hash_hi |= (1 << (hash - 32));
207 }
208 }
209
210 t3_write_reg(adap, A_XGM_RX_HASH_LOW + oft, hash_lo);
211 t3_write_reg(adap, A_XGM_RX_HASH_HIGH + oft, hash_hi);
212 return 0;
213}
214
215int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu)
216{
217 int hwm, lwm;
218 unsigned int thres, v;
219 struct adapter *adap = mac->adapter;
220
221 /*
222 * MAX_FRAME_SIZE inludes header + FCS, mtu doesn't. The HW max
223 * packet size register includes header, but not FCS.
224 */
225 mtu += 14;
226 if (mtu > MAX_FRAME_SIZE - 4)
227 return -EINVAL;
228 t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, mtu);
229
230 /*
231 * Adjust the PAUSE frame watermarks. We always set the LWM, and the
232 * HWM only if flow-control is enabled.
233 */
234 hwm = max(MAC_RXFIFO_SIZE - 3 * mtu, MAC_RXFIFO_SIZE / 2U);
235 hwm = min(hwm, 3 * MAC_RXFIFO_SIZE / 4 + 1024);
236 lwm = hwm - 1024;
237 v = t3_read_reg(adap, A_XGM_RXFIFO_CFG + mac->offset);
238 v &= ~V_RXFIFOPAUSELWM(M_RXFIFOPAUSELWM);
239 v |= V_RXFIFOPAUSELWM(lwm / 8);
240 if (G_RXFIFOPAUSEHWM(v))
241 v = (v & ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM)) |
242 V_RXFIFOPAUSEHWM(hwm / 8);
243 t3_write_reg(adap, A_XGM_RXFIFO_CFG + mac->offset, v);
244
245 /* Adjust the TX FIFO threshold based on the MTU */
246 thres = (adap->params.vpd.cclk * 1000) / 15625;
247 thres = (thres * mtu) / 1000;
248 if (is_10G(adap))
249 thres /= 10;
250 thres = mtu > thres ? (mtu - thres + 7) / 8 : 0;
251 thres = max(thres, 8U); /* need at least 8 */
252 t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + mac->offset,
253 V_TXFIFOTHRESH(M_TXFIFOTHRESH), V_TXFIFOTHRESH(thres));
254 return 0;
255}
256
257int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc)
258{
259 u32 val;
260 struct adapter *adap = mac->adapter;
261 unsigned int oft = mac->offset;
262
263 if (duplex >= 0 && duplex != DUPLEX_FULL)
264 return -EINVAL;
265 if (speed >= 0) {
266 if (speed == SPEED_10)
267 val = V_PORTSPEED(0);
268 else if (speed == SPEED_100)
269 val = V_PORTSPEED(1);
270 else if (speed == SPEED_1000)
271 val = V_PORTSPEED(2);
272 else if (speed == SPEED_10000)
273 val = V_PORTSPEED(3);
274 else
275 return -EINVAL;
276
277 t3_set_reg_field(adap, A_XGM_PORT_CFG + oft,
278 V_PORTSPEED(M_PORTSPEED), val);
279 }
280
281 val = t3_read_reg(adap, A_XGM_RXFIFO_CFG + oft);
282 val &= ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM);
283 if (fc & PAUSE_TX)
284 val |= V_RXFIFOPAUSEHWM(G_RXFIFOPAUSELWM(val) + 128); /* +1KB */
285 t3_write_reg(adap, A_XGM_RXFIFO_CFG + oft, val);
286
287 t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN,
288 (fc & PAUSE_RX) ? F_TXPAUSEEN : 0);
289 return 0;
290}
291
292int t3_mac_enable(struct cmac *mac, int which)
293{
294 int idx = macidx(mac);
295 struct adapter *adap = mac->adapter;
296 unsigned int oft = mac->offset;
297
298 if (which & MAC_DIRECTION_TX) {
299 t3_write_reg(adap, A_XGM_TX_CTRL + oft, F_TXEN);
300 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
301 t3_write_reg(adap, A_TP_PIO_DATA, 0xbf000001);
302 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE);
303 t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, 1 << idx);
304 }
305 if (which & MAC_DIRECTION_RX)
306 t3_write_reg(adap, A_XGM_RX_CTRL + oft, F_RXEN);
307 return 0;
308}
309
310int t3_mac_disable(struct cmac *mac, int which)
311{
312 int idx = macidx(mac);
313 struct adapter *adap = mac->adapter;
314
315 if (which & MAC_DIRECTION_TX) {
316 t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0);
317 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
318 t3_write_reg(adap, A_TP_PIO_DATA, 0xc000001f);
319 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE);
320 t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, 0);
321 }
322 if (which & MAC_DIRECTION_RX)
323 t3_write_reg(adap, A_XGM_RX_CTRL + mac->offset, 0);
324 return 0;
325}
326
327/*
328 * This function is called periodically to accumulate the current values of the
329 * RMON counters into the port statistics. Since the packet counters are only
330 * 32 bits they can overflow in ~286 secs at 10G, so the function should be
331 * called more frequently than that. The byte counters are 45-bit wide, they
332 * would overflow in ~7.8 hours.
333 */
334const struct mac_stats *t3_mac_update_stats(struct cmac *mac)
335{
336#define RMON_READ(mac, addr) t3_read_reg(mac->adapter, addr + mac->offset)
337#define RMON_UPDATE(mac, name, reg) \
338 (mac)->stats.name += (u64)RMON_READ(mac, A_XGM_STAT_##reg)
339#define RMON_UPDATE64(mac, name, reg_lo, reg_hi) \
340 (mac)->stats.name += RMON_READ(mac, A_XGM_STAT_##reg_lo) + \
341 ((u64)RMON_READ(mac, A_XGM_STAT_##reg_hi) << 32)
342
343 u32 v, lo;
344
345 RMON_UPDATE64(mac, rx_octets, RX_BYTES_LOW, RX_BYTES_HIGH);
346 RMON_UPDATE64(mac, rx_frames, RX_FRAMES_LOW, RX_FRAMES_HIGH);
347 RMON_UPDATE(mac, rx_mcast_frames, RX_MCAST_FRAMES);
348 RMON_UPDATE(mac, rx_bcast_frames, RX_BCAST_FRAMES);
349 RMON_UPDATE(mac, rx_fcs_errs, RX_CRC_ERR_FRAMES);
350 RMON_UPDATE(mac, rx_pause, RX_PAUSE_FRAMES);
351 RMON_UPDATE(mac, rx_jabber, RX_JABBER_FRAMES);
352 RMON_UPDATE(mac, rx_short, RX_SHORT_FRAMES);
353 RMON_UPDATE(mac, rx_symbol_errs, RX_SYM_CODE_ERR_FRAMES);
354
355 RMON_UPDATE(mac, rx_too_long, RX_OVERSIZE_FRAMES);
356 mac->stats.rx_too_long += RMON_READ(mac, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT);
357
358 RMON_UPDATE(mac, rx_frames_64, RX_64B_FRAMES);
359 RMON_UPDATE(mac, rx_frames_65_127, RX_65_127B_FRAMES);
360 RMON_UPDATE(mac, rx_frames_128_255, RX_128_255B_FRAMES);
361 RMON_UPDATE(mac, rx_frames_256_511, RX_256_511B_FRAMES);
362 RMON_UPDATE(mac, rx_frames_512_1023, RX_512_1023B_FRAMES);
363 RMON_UPDATE(mac, rx_frames_1024_1518, RX_1024_1518B_FRAMES);
364 RMON_UPDATE(mac, rx_frames_1519_max, RX_1519_MAXB_FRAMES);
365
366 RMON_UPDATE64(mac, tx_octets, TX_BYTE_LOW, TX_BYTE_HIGH);
367 RMON_UPDATE64(mac, tx_frames, TX_FRAME_LOW, TX_FRAME_HIGH);
368 RMON_UPDATE(mac, tx_mcast_frames, TX_MCAST);
369 RMON_UPDATE(mac, tx_bcast_frames, TX_BCAST);
370 RMON_UPDATE(mac, tx_pause, TX_PAUSE);
371 /* This counts error frames in general (bad FCS, underrun, etc). */
372 RMON_UPDATE(mac, tx_underrun, TX_ERR_FRAMES);
373
374 RMON_UPDATE(mac, tx_frames_64, TX_64B_FRAMES);
375 RMON_UPDATE(mac, tx_frames_65_127, TX_65_127B_FRAMES);
376 RMON_UPDATE(mac, tx_frames_128_255, TX_128_255B_FRAMES);
377 RMON_UPDATE(mac, tx_frames_256_511, TX_256_511B_FRAMES);
378 RMON_UPDATE(mac, tx_frames_512_1023, TX_512_1023B_FRAMES);
379 RMON_UPDATE(mac, tx_frames_1024_1518, TX_1024_1518B_FRAMES);
380 RMON_UPDATE(mac, tx_frames_1519_max, TX_1519_MAXB_FRAMES);
381
382 /* The next stat isn't clear-on-read. */
383 t3_write_reg(mac->adapter, A_TP_MIB_INDEX, mac->offset ? 51 : 50);
384 v = t3_read_reg(mac->adapter, A_TP_MIB_RDATA);
385 lo = (u32) mac->stats.rx_cong_drops;
386 mac->stats.rx_cong_drops += (u64) (v - lo);
387
388 return &mac->stats;
389}