Divy Le Ray | 4d22de3 | 2007-01-18 22:04:14 -0500 | [diff] [blame] | 1 | /* |
Divy Le Ray | 1d68e93 | 2007-01-30 19:44:35 -0800 | [diff] [blame] | 2 | * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved. |
Divy Le Ray | 4d22de3 | 2007-01-18 22:04:14 -0500 | [diff] [blame] | 3 | * |
Divy Le Ray | 1d68e93 | 2007-01-30 19:44:35 -0800 | [diff] [blame] | 4 | * This software is available to you under a choice of one of two |
| 5 | * licenses. You may choose to be licensed under the terms of the GNU |
| 6 | * General Public License (GPL) Version 2, available from the file |
| 7 | * COPYING in the main directory of this source tree, or the |
| 8 | * OpenIB.org BSD license below: |
Divy Le Ray | 4d22de3 | 2007-01-18 22:04:14 -0500 | [diff] [blame] | 9 | * |
Divy Le Ray | 1d68e93 | 2007-01-30 19:44:35 -0800 | [diff] [blame] | 10 | * Redistribution and use in source and binary forms, with or |
| 11 | * without modification, are permitted provided that the following |
| 12 | * conditions are met: |
| 13 | * |
| 14 | * - Redistributions of source code must retain the above |
| 15 | * copyright notice, this list of conditions and the following |
| 16 | * disclaimer. |
| 17 | * |
| 18 | * - Redistributions in binary form must reproduce the above |
| 19 | * copyright notice, this list of conditions and the following |
| 20 | * disclaimer in the documentation and/or other materials |
| 21 | * provided with the distribution. |
| 22 | * |
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 30 | * SOFTWARE. |
Divy Le Ray | 4d22de3 | 2007-01-18 22:04:14 -0500 | [diff] [blame] | 31 | */ |
| 32 | |
| 33 | /* This file should not be included directly. Include common.h instead. */ |
| 34 | |
| 35 | #ifndef __T3_ADAPTER_H__ |
| 36 | #define __T3_ADAPTER_H__ |
| 37 | |
| 38 | #include <linux/pci.h> |
| 39 | #include <linux/spinlock.h> |
| 40 | #include <linux/interrupt.h> |
| 41 | #include <linux/timer.h> |
| 42 | #include <linux/cache.h> |
Divy Le Ray | a13fbee | 2007-01-30 19:44:29 -0800 | [diff] [blame] | 43 | #include <linux/mutex.h> |
Divy Le Ray | 4d22de3 | 2007-01-18 22:04:14 -0500 | [diff] [blame] | 44 | #include "t3cdev.h" |
| 45 | #include <asm/semaphore.h> |
| 46 | #include <asm/bitops.h> |
| 47 | #include <asm/io.h> |
| 48 | |
| 49 | typedef irqreturn_t(*intr_handler_t) (int, void *); |
| 50 | |
| 51 | struct vlan_group; |
| 52 | |
| 53 | struct port_info { |
| 54 | struct vlan_group *vlan_grp; |
| 55 | const struct port_type_info *port_type; |
| 56 | u8 port_id; |
| 57 | u8 rx_csum_offload; |
| 58 | u8 nqsets; |
| 59 | u8 first_qset; |
| 60 | struct cphy phy; |
| 61 | struct cmac mac; |
| 62 | struct link_config link_config; |
| 63 | struct net_device_stats netstats; |
| 64 | int activity; |
| 65 | }; |
| 66 | |
| 67 | enum { /* adapter flags */ |
| 68 | FULL_INIT_DONE = (1 << 0), |
| 69 | USING_MSI = (1 << 1), |
| 70 | USING_MSIX = (1 << 2), |
Divy Le Ray | 14ab989 | 2007-01-30 19:43:50 -0800 | [diff] [blame] | 71 | QUEUES_BOUND = (1 << 3), |
Divy Le Ray | 4d22de3 | 2007-01-18 22:04:14 -0500 | [diff] [blame] | 72 | }; |
| 73 | |
| 74 | struct rx_desc; |
| 75 | struct rx_sw_desc; |
| 76 | |
| 77 | struct sge_fl { /* SGE per free-buffer list state */ |
| 78 | unsigned int buf_size; /* size of each Rx buffer */ |
| 79 | unsigned int credits; /* # of available Rx buffers */ |
| 80 | unsigned int size; /* capacity of free list */ |
| 81 | unsigned int cidx; /* consumer index */ |
| 82 | unsigned int pidx; /* producer index */ |
| 83 | unsigned int gen; /* free list generation */ |
| 84 | struct rx_desc *desc; /* address of HW Rx descriptor ring */ |
| 85 | struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */ |
| 86 | dma_addr_t phys_addr; /* physical address of HW ring start */ |
| 87 | unsigned int cntxt_id; /* SGE context id for the free list */ |
| 88 | unsigned long empty; /* # of times queue ran out of buffers */ |
| 89 | }; |
| 90 | |
| 91 | /* |
| 92 | * Bundle size for grouping offload RX packets for delivery to the stack. |
| 93 | * Don't make this too big as we do prefetch on each packet in a bundle. |
| 94 | */ |
| 95 | # define RX_BUNDLE_SIZE 8 |
| 96 | |
| 97 | struct rsp_desc; |
| 98 | |
| 99 | struct sge_rspq { /* state for an SGE response queue */ |
| 100 | unsigned int credits; /* # of pending response credits */ |
| 101 | unsigned int size; /* capacity of response queue */ |
| 102 | unsigned int cidx; /* consumer index */ |
| 103 | unsigned int gen; /* current generation bit */ |
| 104 | unsigned int polling; /* is the queue serviced through NAPI? */ |
| 105 | unsigned int holdoff_tmr; /* interrupt holdoff timer in 100ns */ |
| 106 | unsigned int next_holdoff; /* holdoff time for next interrupt */ |
| 107 | struct rsp_desc *desc; /* address of HW response ring */ |
| 108 | dma_addr_t phys_addr; /* physical address of the ring */ |
| 109 | unsigned int cntxt_id; /* SGE context id for the response q */ |
| 110 | spinlock_t lock; /* guards response processing */ |
| 111 | struct sk_buff *rx_head; /* offload packet receive queue head */ |
| 112 | struct sk_buff *rx_tail; /* offload packet receive queue tail */ |
| 113 | |
| 114 | unsigned long offload_pkts; |
| 115 | unsigned long offload_bundles; |
| 116 | unsigned long eth_pkts; /* # of ethernet packets */ |
| 117 | unsigned long pure_rsps; /* # of pure (non-data) responses */ |
| 118 | unsigned long imm_data; /* responses with immediate data */ |
| 119 | unsigned long rx_drops; /* # of packets dropped due to no mem */ |
| 120 | unsigned long async_notif; /* # of asynchronous notification events */ |
| 121 | unsigned long empty; /* # of times queue ran out of credits */ |
| 122 | unsigned long nomem; /* # of responses deferred due to no mem */ |
| 123 | unsigned long unhandled_irqs; /* # of spurious intrs */ |
| 124 | }; |
| 125 | |
| 126 | struct tx_desc; |
| 127 | struct tx_sw_desc; |
| 128 | |
| 129 | struct sge_txq { /* state for an SGE Tx queue */ |
| 130 | unsigned long flags; /* HW DMA fetch status */ |
| 131 | unsigned int in_use; /* # of in-use Tx descriptors */ |
| 132 | unsigned int size; /* # of descriptors */ |
| 133 | unsigned int processed; /* total # of descs HW has processed */ |
| 134 | unsigned int cleaned; /* total # of descs SW has reclaimed */ |
| 135 | unsigned int stop_thres; /* SW TX queue suspend threshold */ |
| 136 | unsigned int cidx; /* consumer index */ |
| 137 | unsigned int pidx; /* producer index */ |
| 138 | unsigned int gen; /* current value of generation bit */ |
| 139 | unsigned int unacked; /* Tx descriptors used since last COMPL */ |
| 140 | struct tx_desc *desc; /* address of HW Tx descriptor ring */ |
| 141 | struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */ |
| 142 | spinlock_t lock; /* guards enqueueing of new packets */ |
| 143 | unsigned int token; /* WR token */ |
| 144 | dma_addr_t phys_addr; /* physical address of the ring */ |
| 145 | struct sk_buff_head sendq; /* List of backpressured offload packets */ |
| 146 | struct tasklet_struct qresume_tsk; /* restarts the queue */ |
| 147 | unsigned int cntxt_id; /* SGE context id for the Tx q */ |
| 148 | unsigned long stops; /* # of times q has been stopped */ |
| 149 | unsigned long restarts; /* # of queue restarts */ |
| 150 | }; |
| 151 | |
| 152 | enum { /* per port SGE statistics */ |
| 153 | SGE_PSTAT_TSO, /* # of TSO requests */ |
| 154 | SGE_PSTAT_RX_CSUM_GOOD, /* # of successful RX csum offloads */ |
| 155 | SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */ |
| 156 | SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */ |
| 157 | SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */ |
| 158 | |
| 159 | SGE_PSTAT_MAX /* must be last */ |
| 160 | }; |
| 161 | |
| 162 | struct sge_qset { /* an SGE queue set */ |
| 163 | struct sge_rspq rspq; |
| 164 | struct sge_fl fl[SGE_RXQ_PER_SET]; |
| 165 | struct sge_txq txq[SGE_TXQ_PER_SET]; |
| 166 | struct net_device *netdev; /* associated net device */ |
| 167 | unsigned long txq_stopped; /* which Tx queues are stopped */ |
| 168 | struct timer_list tx_reclaim_timer; /* reclaims TX buffers */ |
| 169 | unsigned long port_stats[SGE_PSTAT_MAX]; |
| 170 | } ____cacheline_aligned; |
| 171 | |
| 172 | struct sge { |
| 173 | struct sge_qset qs[SGE_QSETS]; |
| 174 | spinlock_t reg_lock; /* guards non-atomic SGE registers (eg context) */ |
| 175 | }; |
| 176 | |
| 177 | struct adapter { |
| 178 | struct t3cdev tdev; |
| 179 | struct list_head adapter_list; |
| 180 | void __iomem *regs; |
| 181 | struct pci_dev *pdev; |
| 182 | unsigned long registered_device_map; |
| 183 | unsigned long open_device_map; |
| 184 | unsigned long flags; |
| 185 | |
| 186 | const char *name; |
| 187 | int msg_enable; |
| 188 | unsigned int mmio_len; |
| 189 | |
| 190 | struct adapter_params params; |
| 191 | unsigned int slow_intr_mask; |
| 192 | unsigned long irq_stats[IRQ_NUM_STATS]; |
| 193 | |
| 194 | struct { |
| 195 | unsigned short vec; |
| 196 | char desc[22]; |
| 197 | } msix_info[SGE_QSETS + 1]; |
| 198 | |
| 199 | /* T3 modules */ |
| 200 | struct sge sge; |
| 201 | struct mc7 pmrx; |
| 202 | struct mc7 pmtx; |
| 203 | struct mc7 cm; |
| 204 | struct mc5 mc5; |
| 205 | |
| 206 | struct net_device *port[MAX_NPORTS]; |
| 207 | unsigned int check_task_cnt; |
| 208 | struct delayed_work adap_check_task; |
| 209 | struct work_struct ext_intr_handler_task; |
| 210 | |
| 211 | /* |
| 212 | * Dummy netdevices are needed when using multiple receive queues with |
| 213 | * NAPI as each netdevice can service only one queue. |
| 214 | */ |
| 215 | struct net_device *dummy_netdev[SGE_QSETS - 1]; |
| 216 | |
| 217 | struct dentry *debugfs_root; |
| 218 | |
| 219 | struct mutex mdio_lock; |
| 220 | spinlock_t stats_lock; |
| 221 | spinlock_t work_lock; |
| 222 | }; |
| 223 | |
| 224 | static inline u32 t3_read_reg(struct adapter *adapter, u32 reg_addr) |
| 225 | { |
| 226 | u32 val = readl(adapter->regs + reg_addr); |
| 227 | |
| 228 | CH_DBG(adapter, MMIO, "read register 0x%x value 0x%x\n", reg_addr, val); |
| 229 | return val; |
| 230 | } |
| 231 | |
| 232 | static inline void t3_write_reg(struct adapter *adapter, u32 reg_addr, u32 val) |
| 233 | { |
| 234 | CH_DBG(adapter, MMIO, "setting register 0x%x to 0x%x\n", reg_addr, val); |
| 235 | writel(val, adapter->regs + reg_addr); |
| 236 | } |
| 237 | |
| 238 | static inline struct port_info *adap2pinfo(struct adapter *adap, int idx) |
| 239 | { |
| 240 | return netdev_priv(adap->port[idx]); |
| 241 | } |
| 242 | |
| 243 | /* |
| 244 | * We use the spare atalk_ptr to map a net device to its SGE queue set. |
| 245 | * This is a macro so it can be used as l-value. |
| 246 | */ |
| 247 | #define dev2qset(netdev) ((netdev)->atalk_ptr) |
| 248 | |
| 249 | #define OFFLOAD_DEVMAP_BIT 15 |
| 250 | |
| 251 | #define tdev2adap(d) container_of(d, struct adapter, tdev) |
| 252 | |
| 253 | static inline int offload_running(struct adapter *adapter) |
| 254 | { |
| 255 | return test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map); |
| 256 | } |
| 257 | |
| 258 | int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb); |
| 259 | |
| 260 | void t3_os_ext_intr_handler(struct adapter *adapter); |
| 261 | void t3_os_link_changed(struct adapter *adapter, int port_id, int link_status, |
| 262 | int speed, int duplex, int fc); |
| 263 | |
| 264 | void t3_sge_start(struct adapter *adap); |
| 265 | void t3_sge_stop(struct adapter *adap); |
| 266 | void t3_free_sge_resources(struct adapter *adap); |
| 267 | void t3_sge_err_intr_handler(struct adapter *adapter); |
| 268 | intr_handler_t t3_intr_handler(struct adapter *adap, int polling); |
| 269 | int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev); |
Divy Le Ray | 14ab989 | 2007-01-30 19:43:50 -0800 | [diff] [blame] | 270 | int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb); |
Divy Le Ray | 4d22de3 | 2007-01-18 22:04:14 -0500 | [diff] [blame] | 271 | void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p); |
| 272 | int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, |
| 273 | int irq_vec_idx, const struct qset_params *p, |
| 274 | int ntxq, struct net_device *netdev); |
| 275 | int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx, |
| 276 | unsigned char *data); |
| 277 | irqreturn_t t3_sge_intr_msix(int irq, void *cookie); |
| 278 | |
| 279 | #endif /* __T3_ADAPTER_H__ */ |