blob: 60a979b62a65e6c29b6c1a92d833aa49c401d917 [file] [log] [blame]
Divy Le Ray4d22de32007-01-18 22:04:14 -05001/*
2 * This file is part of the Chelsio T3 Ethernet driver.
3 *
4 * Copyright (C) 2005-2006 Chelsio Communications. All rights reserved.
5 *
6 * This program is distributed in the hope that it will be useful, but WITHOUT
7 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
9 * release for licensing terms and conditions.
10 */
11
12#ifndef __CHELSIO_COMMON_H
13#define __CHELSIO_COMMON_H
14
15#include <linux/kernel.h>
16#include <linux/types.h>
17#include <linux/ctype.h>
18#include <linux/delay.h>
19#include <linux/init.h>
20#include <linux/netdevice.h>
21#include <linux/ethtool.h>
22#include <linux/mii.h>
23#include "version.h"
24
25#define CH_ERR(adap, fmt, ...) dev_err(&adap->pdev->dev, fmt, ## __VA_ARGS__)
26#define CH_WARN(adap, fmt, ...) dev_warn(&adap->pdev->dev, fmt, ## __VA_ARGS__)
27#define CH_ALERT(adap, fmt, ...) \
28 dev_printk(KERN_ALERT, &adap->pdev->dev, fmt, ## __VA_ARGS__)
29
30/*
31 * More powerful macro that selectively prints messages based on msg_enable.
32 * For info and debugging messages.
33 */
34#define CH_MSG(adapter, level, category, fmt, ...) do { \
35 if ((adapter)->msg_enable & NETIF_MSG_##category) \
36 dev_printk(KERN_##level, &adapter->pdev->dev, fmt, \
37 ## __VA_ARGS__); \
38} while (0)
39
40#ifdef DEBUG
41# define CH_DBG(adapter, category, fmt, ...) \
42 CH_MSG(adapter, DEBUG, category, fmt, ## __VA_ARGS__)
43#else
44# define CH_DBG(adapter, category, fmt, ...)
45#endif
46
47/* Additional NETIF_MSG_* categories */
48#define NETIF_MSG_MMIO 0x8000000
49
50struct t3_rx_mode {
51 struct net_device *dev;
52 struct dev_mc_list *mclist;
53 unsigned int idx;
54};
55
56static inline void init_rx_mode(struct t3_rx_mode *p, struct net_device *dev,
57 struct dev_mc_list *mclist)
58{
59 p->dev = dev;
60 p->mclist = mclist;
61 p->idx = 0;
62}
63
64static inline u8 *t3_get_next_mcaddr(struct t3_rx_mode *rm)
65{
66 u8 *addr = NULL;
67
68 if (rm->mclist && rm->idx < rm->dev->mc_count) {
69 addr = rm->mclist->dmi_addr;
70 rm->mclist = rm->mclist->next;
71 rm->idx++;
72 }
73 return addr;
74}
75
76enum {
77 MAX_NPORTS = 2, /* max # of ports */
78 MAX_FRAME_SIZE = 10240, /* max MAC frame size, including header + FCS */
79 EEPROMSIZE = 8192, /* Serial EEPROM size */
80 RSS_TABLE_SIZE = 64, /* size of RSS lookup and mapping tables */
81 TCB_SIZE = 128, /* TCB size */
82 NMTUS = 16, /* size of MTU table */
83 NCCTRL_WIN = 32, /* # of congestion control windows */
84};
85
86#define MAX_RX_COALESCING_LEN 16224U
87
88enum {
89 PAUSE_RX = 1 << 0,
90 PAUSE_TX = 1 << 1,
91 PAUSE_AUTONEG = 1 << 2
92};
93
94enum {
95 SUPPORTED_OFFLOAD = 1 << 24,
96 SUPPORTED_IRQ = 1 << 25
97};
98
99enum { /* adapter interrupt-maintained statistics */
100 STAT_ULP_CH0_PBL_OOB,
101 STAT_ULP_CH1_PBL_OOB,
102 STAT_PCI_CORR_ECC,
103
104 IRQ_NUM_STATS /* keep last */
105};
106
107enum {
108 SGE_QSETS = 8, /* # of SGE Tx/Rx/RspQ sets */
109 SGE_RXQ_PER_SET = 2, /* # of Rx queues per set */
110 SGE_TXQ_PER_SET = 3 /* # of Tx queues per set */
111};
112
113enum sge_context_type { /* SGE egress context types */
114 SGE_CNTXT_RDMA = 0,
115 SGE_CNTXT_ETH = 2,
116 SGE_CNTXT_OFLD = 4,
117 SGE_CNTXT_CTRL = 5
118};
119
120enum {
121 AN_PKT_SIZE = 32, /* async notification packet size */
122 IMMED_PKT_SIZE = 48 /* packet size for immediate data */
123};
124
125struct sg_ent { /* SGE scatter/gather entry */
126 u32 len[2];
127 u64 addr[2];
128};
129
130#ifndef SGE_NUM_GENBITS
131/* Must be 1 or 2 */
132# define SGE_NUM_GENBITS 2
133#endif
134
135#define TX_DESC_FLITS 16U
136#define WR_FLITS (TX_DESC_FLITS + 1 - SGE_NUM_GENBITS)
137
138struct cphy;
139struct adapter;
140
141struct mdio_ops {
142 int (*read)(struct adapter *adapter, int phy_addr, int mmd_addr,
143 int reg_addr, unsigned int *val);
144 int (*write)(struct adapter *adapter, int phy_addr, int mmd_addr,
145 int reg_addr, unsigned int val);
146};
147
148struct adapter_info {
149 unsigned char nports; /* # of ports */
150 unsigned char phy_base_addr; /* MDIO PHY base address */
151 unsigned char mdien;
152 unsigned char mdiinv;
153 unsigned int gpio_out; /* GPIO output settings */
154 unsigned int gpio_intr; /* GPIO IRQ enable mask */
155 unsigned long caps; /* adapter capabilities */
156 const struct mdio_ops *mdio_ops; /* MDIO operations */
157 const char *desc; /* product description */
158};
159
160struct port_type_info {
161 void (*phy_prep)(struct cphy *phy, struct adapter *adapter,
162 int phy_addr, const struct mdio_ops *ops);
163 unsigned int caps;
164 const char *desc;
165};
166
167struct mc5_stats {
168 unsigned long parity_err;
169 unsigned long active_rgn_full;
170 unsigned long nfa_srch_err;
171 unsigned long unknown_cmd;
172 unsigned long reqq_parity_err;
173 unsigned long dispq_parity_err;
174 unsigned long del_act_empty;
175};
176
177struct mc7_stats {
178 unsigned long corr_err;
179 unsigned long uncorr_err;
180 unsigned long parity_err;
181 unsigned long addr_err;
182};
183
184struct mac_stats {
185 u64 tx_octets; /* total # of octets in good frames */
186 u64 tx_octets_bad; /* total # of octets in error frames */
187 u64 tx_frames; /* all good frames */
188 u64 tx_mcast_frames; /* good multicast frames */
189 u64 tx_bcast_frames; /* good broadcast frames */
190 u64 tx_pause; /* # of transmitted pause frames */
191 u64 tx_deferred; /* frames with deferred transmissions */
192 u64 tx_late_collisions; /* # of late collisions */
193 u64 tx_total_collisions; /* # of total collisions */
194 u64 tx_excess_collisions; /* frame errors from excessive collissions */
195 u64 tx_underrun; /* # of Tx FIFO underruns */
196 u64 tx_len_errs; /* # of Tx length errors */
197 u64 tx_mac_internal_errs; /* # of internal MAC errors on Tx */
198 u64 tx_excess_deferral; /* # of frames with excessive deferral */
199 u64 tx_fcs_errs; /* # of frames with bad FCS */
200
201 u64 tx_frames_64; /* # of Tx frames in a particular range */
202 u64 tx_frames_65_127;
203 u64 tx_frames_128_255;
204 u64 tx_frames_256_511;
205 u64 tx_frames_512_1023;
206 u64 tx_frames_1024_1518;
207 u64 tx_frames_1519_max;
208
209 u64 rx_octets; /* total # of octets in good frames */
210 u64 rx_octets_bad; /* total # of octets in error frames */
211 u64 rx_frames; /* all good frames */
212 u64 rx_mcast_frames; /* good multicast frames */
213 u64 rx_bcast_frames; /* good broadcast frames */
214 u64 rx_pause; /* # of received pause frames */
215 u64 rx_fcs_errs; /* # of received frames with bad FCS */
216 u64 rx_align_errs; /* alignment errors */
217 u64 rx_symbol_errs; /* symbol errors */
218 u64 rx_data_errs; /* data errors */
219 u64 rx_sequence_errs; /* sequence errors */
220 u64 rx_runt; /* # of runt frames */
221 u64 rx_jabber; /* # of jabber frames */
222 u64 rx_short; /* # of short frames */
223 u64 rx_too_long; /* # of oversized frames */
224 u64 rx_mac_internal_errs; /* # of internal MAC errors on Rx */
225
226 u64 rx_frames_64; /* # of Rx frames in a particular range */
227 u64 rx_frames_65_127;
228 u64 rx_frames_128_255;
229 u64 rx_frames_256_511;
230 u64 rx_frames_512_1023;
231 u64 rx_frames_1024_1518;
232 u64 rx_frames_1519_max;
233
234 u64 rx_cong_drops; /* # of Rx drops due to SGE congestion */
235
236 unsigned long tx_fifo_parity_err;
237 unsigned long rx_fifo_parity_err;
238 unsigned long tx_fifo_urun;
239 unsigned long rx_fifo_ovfl;
240 unsigned long serdes_signal_loss;
241 unsigned long xaui_pcs_ctc_err;
242 unsigned long xaui_pcs_align_change;
243};
244
245struct tp_mib_stats {
246 u32 ipInReceive_hi;
247 u32 ipInReceive_lo;
248 u32 ipInHdrErrors_hi;
249 u32 ipInHdrErrors_lo;
250 u32 ipInAddrErrors_hi;
251 u32 ipInAddrErrors_lo;
252 u32 ipInUnknownProtos_hi;
253 u32 ipInUnknownProtos_lo;
254 u32 ipInDiscards_hi;
255 u32 ipInDiscards_lo;
256 u32 ipInDelivers_hi;
257 u32 ipInDelivers_lo;
258 u32 ipOutRequests_hi;
259 u32 ipOutRequests_lo;
260 u32 ipOutDiscards_hi;
261 u32 ipOutDiscards_lo;
262 u32 ipOutNoRoutes_hi;
263 u32 ipOutNoRoutes_lo;
264 u32 ipReasmTimeout;
265 u32 ipReasmReqds;
266 u32 ipReasmOKs;
267 u32 ipReasmFails;
268
269 u32 reserved[8];
270
271 u32 tcpActiveOpens;
272 u32 tcpPassiveOpens;
273 u32 tcpAttemptFails;
274 u32 tcpEstabResets;
275 u32 tcpOutRsts;
276 u32 tcpCurrEstab;
277 u32 tcpInSegs_hi;
278 u32 tcpInSegs_lo;
279 u32 tcpOutSegs_hi;
280 u32 tcpOutSegs_lo;
281 u32 tcpRetransSeg_hi;
282 u32 tcpRetransSeg_lo;
283 u32 tcpInErrs_hi;
284 u32 tcpInErrs_lo;
285 u32 tcpRtoMin;
286 u32 tcpRtoMax;
287};
288
289struct tp_params {
290 unsigned int nchan; /* # of channels */
291 unsigned int pmrx_size; /* total PMRX capacity */
292 unsigned int pmtx_size; /* total PMTX capacity */
293 unsigned int cm_size; /* total CM capacity */
294 unsigned int chan_rx_size; /* per channel Rx size */
295 unsigned int chan_tx_size; /* per channel Tx size */
296 unsigned int rx_pg_size; /* Rx page size */
297 unsigned int tx_pg_size; /* Tx page size */
298 unsigned int rx_num_pgs; /* # of Rx pages */
299 unsigned int tx_num_pgs; /* # of Tx pages */
300 unsigned int ntimer_qs; /* # of timer queues */
301};
302
303struct qset_params { /* SGE queue set parameters */
304 unsigned int polling; /* polling/interrupt service for rspq */
305 unsigned int coalesce_usecs; /* irq coalescing timer */
306 unsigned int rspq_size; /* # of entries in response queue */
307 unsigned int fl_size; /* # of entries in regular free list */
308 unsigned int jumbo_size; /* # of entries in jumbo free list */
309 unsigned int txq_size[SGE_TXQ_PER_SET]; /* Tx queue sizes */
310 unsigned int cong_thres; /* FL congestion threshold */
311};
312
313struct sge_params {
314 unsigned int max_pkt_size; /* max offload pkt size */
315 struct qset_params qset[SGE_QSETS];
316};
317
318struct mc5_params {
319 unsigned int mode; /* selects MC5 width */
320 unsigned int nservers; /* size of server region */
321 unsigned int nfilters; /* size of filter region */
322 unsigned int nroutes; /* size of routing region */
323};
324
325/* Default MC5 region sizes */
326enum {
327 DEFAULT_NSERVERS = 512,
328 DEFAULT_NFILTERS = 128
329};
330
331/* MC5 modes, these must be non-0 */
332enum {
333 MC5_MODE_144_BIT = 1,
334 MC5_MODE_72_BIT = 2
335};
336
337struct vpd_params {
338 unsigned int cclk;
339 unsigned int mclk;
340 unsigned int uclk;
341 unsigned int mdc;
342 unsigned int mem_timing;
343 u8 eth_base[6];
344 u8 port_type[MAX_NPORTS];
345 unsigned short xauicfg[2];
346};
347
348struct pci_params {
349 unsigned int vpd_cap_addr;
350 unsigned int pcie_cap_addr;
351 unsigned short speed;
352 unsigned char width;
353 unsigned char variant;
354};
355
356enum {
357 PCI_VARIANT_PCI,
358 PCI_VARIANT_PCIX_MODE1_PARITY,
359 PCI_VARIANT_PCIX_MODE1_ECC,
360 PCI_VARIANT_PCIX_266_MODE2,
361 PCI_VARIANT_PCIE
362};
363
364struct adapter_params {
365 struct sge_params sge;
366 struct mc5_params mc5;
367 struct tp_params tp;
368 struct vpd_params vpd;
369 struct pci_params pci;
370
371 const struct adapter_info *info;
372
373 unsigned short mtus[NMTUS];
374 unsigned short a_wnd[NCCTRL_WIN];
375 unsigned short b_wnd[NCCTRL_WIN];
376
377 unsigned int nports; /* # of ethernet ports */
378 unsigned int stats_update_period; /* MAC stats accumulation period */
379 unsigned int linkpoll_period; /* link poll period in 0.1s */
380 unsigned int rev; /* chip revision */
381};
382
383struct trace_params {
384 u32 sip;
385 u32 sip_mask;
386 u32 dip;
387 u32 dip_mask;
388 u16 sport;
389 u16 sport_mask;
390 u16 dport;
391 u16 dport_mask;
392 u32 vlan:12;
393 u32 vlan_mask:12;
394 u32 intf:4;
395 u32 intf_mask:4;
396 u8 proto;
397 u8 proto_mask;
398};
399
400struct link_config {
401 unsigned int supported; /* link capabilities */
402 unsigned int advertising; /* advertised capabilities */
403 unsigned short requested_speed; /* speed user has requested */
404 unsigned short speed; /* actual link speed */
405 unsigned char requested_duplex; /* duplex user has requested */
406 unsigned char duplex; /* actual link duplex */
407 unsigned char requested_fc; /* flow control user has requested */
408 unsigned char fc; /* actual link flow control */
409 unsigned char autoneg; /* autonegotiating? */
410 unsigned int link_ok; /* link up? */
411};
412
413#define SPEED_INVALID 0xffff
414#define DUPLEX_INVALID 0xff
415
416struct mc5 {
417 struct adapter *adapter;
418 unsigned int tcam_size;
419 unsigned char part_type;
420 unsigned char parity_enabled;
421 unsigned char mode;
422 struct mc5_stats stats;
423};
424
425static inline unsigned int t3_mc5_size(const struct mc5 *p)
426{
427 return p->tcam_size;
428}
429
430struct mc7 {
431 struct adapter *adapter; /* backpointer to adapter */
432 unsigned int size; /* memory size in bytes */
433 unsigned int width; /* MC7 interface width */
434 unsigned int offset; /* register address offset for MC7 instance */
435 const char *name; /* name of MC7 instance */
436 struct mc7_stats stats; /* MC7 statistics */
437};
438
439static inline unsigned int t3_mc7_size(const struct mc7 *p)
440{
441 return p->size;
442}
443
444struct cmac {
445 struct adapter *adapter;
446 unsigned int offset;
447 unsigned int nucast; /* # of address filters for unicast MACs */
448 struct mac_stats stats;
449};
450
451enum {
452 MAC_DIRECTION_RX = 1,
453 MAC_DIRECTION_TX = 2,
454 MAC_RXFIFO_SIZE = 32768
455};
456
457/* IEEE 802.3ae specified MDIO devices */
458enum {
459 MDIO_DEV_PMA_PMD = 1,
460 MDIO_DEV_WIS = 2,
461 MDIO_DEV_PCS = 3,
462 MDIO_DEV_XGXS = 4
463};
464
465/* PHY loopback direction */
466enum {
467 PHY_LOOPBACK_TX = 1,
468 PHY_LOOPBACK_RX = 2
469};
470
471/* PHY interrupt types */
472enum {
473 cphy_cause_link_change = 1,
474 cphy_cause_fifo_error = 2
475};
476
477/* PHY operations */
478struct cphy_ops {
479 void (*destroy)(struct cphy *phy);
480 int (*reset)(struct cphy *phy, int wait);
481
482 int (*intr_enable)(struct cphy *phy);
483 int (*intr_disable)(struct cphy *phy);
484 int (*intr_clear)(struct cphy *phy);
485 int (*intr_handler)(struct cphy *phy);
486
487 int (*autoneg_enable)(struct cphy *phy);
488 int (*autoneg_restart)(struct cphy *phy);
489
490 int (*advertise)(struct cphy *phy, unsigned int advertise_map);
491 int (*set_loopback)(struct cphy *phy, int mmd, int dir, int enable);
492 int (*set_speed_duplex)(struct cphy *phy, int speed, int duplex);
493 int (*get_link_status)(struct cphy *phy, int *link_ok, int *speed,
494 int *duplex, int *fc);
495 int (*power_down)(struct cphy *phy, int enable);
496};
497
498/* A PHY instance */
499struct cphy {
500 int addr; /* PHY address */
501 struct adapter *adapter; /* associated adapter */
502 unsigned long fifo_errors; /* FIFO over/under-flows */
503 const struct cphy_ops *ops; /* PHY operations */
504 int (*mdio_read)(struct adapter *adapter, int phy_addr, int mmd_addr,
505 int reg_addr, unsigned int *val);
506 int (*mdio_write)(struct adapter *adapter, int phy_addr, int mmd_addr,
507 int reg_addr, unsigned int val);
508};
509
510/* Convenience MDIO read/write wrappers */
511static inline int mdio_read(struct cphy *phy, int mmd, int reg,
512 unsigned int *valp)
513{
514 return phy->mdio_read(phy->adapter, phy->addr, mmd, reg, valp);
515}
516
517static inline int mdio_write(struct cphy *phy, int mmd, int reg,
518 unsigned int val)
519{
520 return phy->mdio_write(phy->adapter, phy->addr, mmd, reg, val);
521}
522
523/* Convenience initializer */
524static inline void cphy_init(struct cphy *phy, struct adapter *adapter,
525 int phy_addr, struct cphy_ops *phy_ops,
526 const struct mdio_ops *mdio_ops)
527{
528 phy->adapter = adapter;
529 phy->addr = phy_addr;
530 phy->ops = phy_ops;
531 if (mdio_ops) {
532 phy->mdio_read = mdio_ops->read;
533 phy->mdio_write = mdio_ops->write;
534 }
535}
536
537/* Accumulate MAC statistics every 180 seconds. For 1G we multiply by 10. */
538#define MAC_STATS_ACCUM_SECS 180
539
540#define XGM_REG(reg_addr, idx) \
541 ((reg_addr) + (idx) * (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR))
542
543struct addr_val_pair {
544 unsigned int reg_addr;
545 unsigned int val;
546};
547
548#include "adapter.h"
549
550#ifndef PCI_VENDOR_ID_CHELSIO
551# define PCI_VENDOR_ID_CHELSIO 0x1425
552#endif
553
554#define for_each_port(adapter, iter) \
555 for (iter = 0; iter < (adapter)->params.nports; ++iter)
556
557#define adapter_info(adap) ((adap)->params.info)
558
559static inline int uses_xaui(const struct adapter *adap)
560{
561 return adapter_info(adap)->caps & SUPPORTED_AUI;
562}
563
564static inline int is_10G(const struct adapter *adap)
565{
566 return adapter_info(adap)->caps & SUPPORTED_10000baseT_Full;
567}
568
569static inline int is_offload(const struct adapter *adap)
570{
571 return adapter_info(adap)->caps & SUPPORTED_OFFLOAD;
572}
573
574static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
575{
576 return adap->params.vpd.cclk / 1000;
577}
578
579static inline unsigned int is_pcie(const struct adapter *adap)
580{
581 return adap->params.pci.variant == PCI_VARIANT_PCIE;
582}
583
584void t3_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
585 u32 val);
586void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
587 int n, unsigned int offset);
588int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
589 int polarity, int attempts, int delay, u32 *valp);
590static inline int t3_wait_op_done(struct adapter *adapter, int reg, u32 mask,
591 int polarity, int attempts, int delay)
592{
593 return t3_wait_op_done_val(adapter, reg, mask, polarity, attempts,
594 delay, NULL);
595}
596int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
597 unsigned int set);
598int t3_phy_reset(struct cphy *phy, int mmd, int wait);
599int t3_phy_advertise(struct cphy *phy, unsigned int advert);
600int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex);
601
602void t3_intr_enable(struct adapter *adapter);
603void t3_intr_disable(struct adapter *adapter);
604void t3_intr_clear(struct adapter *adapter);
605void t3_port_intr_enable(struct adapter *adapter, int idx);
606void t3_port_intr_disable(struct adapter *adapter, int idx);
607void t3_port_intr_clear(struct adapter *adapter, int idx);
608int t3_slow_intr_handler(struct adapter *adapter);
609int t3_phy_intr_handler(struct adapter *adapter);
610
611void t3_link_changed(struct adapter *adapter, int port_id);
612int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
613const struct adapter_info *t3_get_adapter_info(unsigned int board_id);
614int t3_seeprom_read(struct adapter *adapter, u32 addr, u32 *data);
615int t3_seeprom_write(struct adapter *adapter, u32 addr, u32 data);
616int t3_seeprom_wp(struct adapter *adapter, int enable);
617int t3_read_flash(struct adapter *adapter, unsigned int addr,
618 unsigned int nwords, u32 *data, int byte_oriented);
619int t3_load_fw(struct adapter *adapter, const u8 * fw_data, unsigned int size);
620int t3_get_fw_version(struct adapter *adapter, u32 *vers);
621int t3_check_fw_version(struct adapter *adapter);
622int t3_init_hw(struct adapter *adapter, u32 fw_params);
623void mac_prep(struct cmac *mac, struct adapter *adapter, int index);
624void early_hw_init(struct adapter *adapter, const struct adapter_info *ai);
625int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
626 int reset);
627void t3_led_ready(struct adapter *adapter);
628void t3_fatal_err(struct adapter *adapter);
629void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
630void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
631 const u8 * cpus, const u16 *rspq);
632int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map);
633int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask);
634int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
635 unsigned int n, unsigned int *valp);
636int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
637 u64 *buf);
638
639int t3_mac_reset(struct cmac *mac);
640void t3b_pcs_reset(struct cmac *mac);
641int t3_mac_enable(struct cmac *mac, int which);
642int t3_mac_disable(struct cmac *mac, int which);
643int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu);
644int t3_mac_set_rx_mode(struct cmac *mac, struct t3_rx_mode *rm);
645int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6]);
646int t3_mac_set_num_ucast(struct cmac *mac, int n);
647const struct mac_stats *t3_mac_update_stats(struct cmac *mac);
648int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc);
649
650void t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode);
651int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters,
652 unsigned int nroutes);
653void t3_mc5_intr_handler(struct mc5 *mc5);
654int t3_read_mc5_range(const struct mc5 *mc5, unsigned int start, unsigned int n,
655 u32 *buf);
656
657int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh);
658void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size);
659void t3_tp_set_offload_mode(struct adapter *adap, int enable);
660void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps);
661void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
662 unsigned short alpha[NCCTRL_WIN],
663 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap);
664void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS]);
665void t3_get_cong_cntl_tab(struct adapter *adap,
666 unsigned short incr[NMTUS][NCCTRL_WIN]);
667void t3_config_trace_filter(struct adapter *adapter,
668 const struct trace_params *tp, int filter_index,
669 int invert, int enable);
670int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched);
671
672void t3_sge_prep(struct adapter *adap, struct sge_params *p);
673void t3_sge_init(struct adapter *adap, struct sge_params *p);
674int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
675 enum sge_context_type type, int respq, u64 base_addr,
676 unsigned int size, unsigned int token, int gen,
677 unsigned int cidx);
678int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
679 int gts_enable, u64 base_addr, unsigned int size,
680 unsigned int esize, unsigned int cong_thres, int gen,
681 unsigned int cidx);
682int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
683 int irq_vec_idx, u64 base_addr, unsigned int size,
684 unsigned int fl_thres, int gen, unsigned int cidx);
685int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
686 unsigned int size, int rspq, int ovfl_mode,
687 unsigned int credits, unsigned int credit_thres);
688int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable);
689int t3_sge_disable_fl(struct adapter *adapter, unsigned int id);
690int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id);
691int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id);
692int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4]);
693int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4]);
694int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4]);
695int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4]);
696int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
697 unsigned int credits);
698
699void t3_vsc8211_phy_prep(struct cphy *phy, struct adapter *adapter,
700 int phy_addr, const struct mdio_ops *mdio_ops);
701void t3_ael1002_phy_prep(struct cphy *phy, struct adapter *adapter,
702 int phy_addr, const struct mdio_ops *mdio_ops);
703void t3_ael1006_phy_prep(struct cphy *phy, struct adapter *adapter,
704 int phy_addr, const struct mdio_ops *mdio_ops);
705void t3_qt2045_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr,
706 const struct mdio_ops *mdio_ops);
707void t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
708 int phy_addr, const struct mdio_ops *mdio_ops);
709#endif /* __CHELSIO_COMMON_H */