blob: 6e9a8d9ef59261fbcc3c95225293474a7a536c7b [file] [log] [blame]
Casey Leedombe839e32010-06-25 12:14:15 +00001/*
2 * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
3 * driver for Linux.
4 *
5 * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36/*
37 * This file should not be included directly. Include t4vf_common.h instead.
38 */
39
40#ifndef __CXGB4VF_ADAPTER_H__
41#define __CXGB4VF_ADAPTER_H__
42
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000043#include <linux/interrupt.h>
Casey Leedombe839e32010-06-25 12:14:15 +000044#include <linux/pci.h>
45#include <linux/spinlock.h>
46#include <linux/skbuff.h>
47#include <linux/if_ether.h>
48#include <linux/netdevice.h>
49
50#include "../cxgb4/t4_hw.h"
51
52/*
53 * Constants of the implementation.
54 */
55enum {
56 MAX_NPORTS = 1, /* max # of "ports" */
57 MAX_PORT_QSETS = 8, /* max # of Queue Sets / "port" */
58 MAX_ETH_QSETS = MAX_NPORTS*MAX_PORT_QSETS,
59
60 /*
61 * MSI-X interrupt index usage.
62 */
63 MSIX_FW = 0, /* MSI-X index for firmware Q */
Casey Leedomcaedda32010-11-11 09:30:40 +000064 MSIX_IQFLINT = 1, /* MSI-X index base for Ingress Qs */
Casey Leedombe839e32010-06-25 12:14:15 +000065 MSIX_EXTRAS = 1,
66 MSIX_ENTRIES = MAX_ETH_QSETS + MSIX_EXTRAS,
67
68 /*
69 * The maximum number of Ingress and Egress Queues is determined by
70 * the maximum number of "Queue Sets" which we support plus any
71 * ancillary queues. Each "Queue Set" requires one Ingress Queue
72 * for RX Packet Ingress Event notifications and two Egress Queues for
73 * a Free List and an Ethernet TX list.
74 */
75 INGQ_EXTRAS = 2, /* firmware event queue and */
76 /* forwarded interrupts */
77 MAX_INGQ = MAX_ETH_QSETS+INGQ_EXTRAS,
78 MAX_EGRQ = MAX_ETH_QSETS*2,
79};
80
81/*
82 * Forward structure definition references.
83 */
84struct adapter;
85struct sge_eth_rxq;
86struct sge_rspq;
87
88/*
89 * Per-"port" information. This is really per-Virtual Interface information
90 * but the use of the "port" nomanclature makes it easier to go back and forth
91 * between the PF and VF drivers ...
92 */
93struct port_info {
94 struct adapter *adapter; /* our adapter */
95 struct vlan_group *vlan_grp; /* out VLAN group */
96 u16 viid; /* virtual interface ID */
97 s16 xact_addr_filt; /* index of our MAC address filter */
98 u16 rss_size; /* size of VI's RSS table slice */
99 u8 pidx; /* index into adapter port[] */
100 u8 port_id; /* physical port ID */
Casey Leedombe839e32010-06-25 12:14:15 +0000101 u8 nqsets; /* # of "Queue Sets" */
102 u8 first_qset; /* index of first "Queue Set" */
103 struct link_config link_cfg; /* physical port configuration */
104};
105
Casey Leedombe839e32010-06-25 12:14:15 +0000106/*
107 * Scatter Gather Engine resources for the "adapter". Our ingress and egress
108 * queues are organized into "Queue Sets" with one ingress and one egress
109 * queue per Queue Set. These Queue Sets are aportionable between the "ports"
110 * (Virtual Interfaces). One extra ingress queue is used to receive
111 * asynchronous messages from the firmware. Note that the "Queue IDs" that we
112 * use here are really "Relative Queue IDs" which are returned as part of the
113 * firmware command to allocate queues. These queue IDs are relative to the
114 * absolute Queue ID base of the section of the Queue ID space allocated to
115 * the PF/VF.
116 */
117
118/*
119 * SGE free-list queue state.
120 */
121struct rx_sw_desc;
122struct sge_fl {
123 unsigned int avail; /* # of available RX buffers */
124 unsigned int pend_cred; /* new buffers since last FL DB ring */
125 unsigned int cidx; /* consumer index */
126 unsigned int pidx; /* producer index */
127 unsigned long alloc_failed; /* # of buffer allocation failures */
128 unsigned long large_alloc_failed;
129 unsigned long starving; /* # of times FL was found starving */
130
131 /*
132 * Write-once/infrequently fields.
133 * -------------------------------
134 */
135
136 unsigned int cntxt_id; /* SGE relative QID for the free list */
137 unsigned int abs_id; /* SGE absolute QID for the free list */
138 unsigned int size; /* capacity of free list */
139 struct rx_sw_desc *sdesc; /* address of SW RX descriptor ring */
140 __be64 *desc; /* address of HW RX descriptor ring */
141 dma_addr_t addr; /* PCI bus address of hardware ring */
142};
143
144/*
145 * An ingress packet gather list.
146 */
147struct pkt_gl {
148 skb_frag_t frags[MAX_SKB_FRAGS];
149 void *va; /* virtual address of first byte */
150 unsigned int nfrags; /* # of fragments */
151 unsigned int tot_len; /* total length of fragments */
152};
153
154typedef int (*rspq_handler_t)(struct sge_rspq *, const __be64 *,
155 const struct pkt_gl *);
156
157/*
158 * State for an SGE Response Queue.
159 */
160struct sge_rspq {
161 struct napi_struct napi; /* NAPI scheduling control */
162 const __be64 *cur_desc; /* current descriptor in queue */
163 unsigned int cidx; /* consumer index */
164 u8 gen; /* current generation bit */
165 u8 next_intr_params; /* holdoff params for next interrupt */
166 int offset; /* offset into current FL buffer */
167
168 unsigned int unhandled_irqs; /* bogus interrupts */
169
170 /*
171 * Write-once/infrequently fields.
172 * -------------------------------
173 */
174
175 u8 intr_params; /* interrupt holdoff parameters */
176 u8 pktcnt_idx; /* interrupt packet threshold */
177 u8 idx; /* queue index within its group */
178 u16 cntxt_id; /* SGE rel QID for the response Q */
179 u16 abs_id; /* SGE abs QID for the response Q */
180 __be64 *desc; /* address of hardware response ring */
181 dma_addr_t phys_addr; /* PCI bus address of ring */
182 unsigned int iqe_len; /* entry size */
183 unsigned int size; /* capcity of response Q */
184 struct adapter *adapter; /* our adapter */
185 struct net_device *netdev; /* associated net device */
186 rspq_handler_t handler; /* the handler for this response Q */
187};
188
189/*
190 * Ethernet queue statistics
191 */
192struct sge_eth_stats {
193 unsigned long pkts; /* # of ethernet packets */
194 unsigned long lro_pkts; /* # of LRO super packets */
195 unsigned long lro_merged; /* # of wire packets merged by LRO */
196 unsigned long rx_cso; /* # of Rx checksum offloads */
197 unsigned long vlan_ex; /* # of Rx VLAN extractions */
198 unsigned long rx_drops; /* # of packets dropped due to no mem */
199};
200
201/*
202 * State for an Ethernet Receive Queue.
203 */
204struct sge_eth_rxq {
205 struct sge_rspq rspq; /* Response Queue */
206 struct sge_fl fl; /* Free List */
207 struct sge_eth_stats stats; /* receive statistics */
208};
209
210/*
211 * SGE Transmit Queue state. This contains all of the resources associated
212 * with the hardware status of a TX Queue which is a circular ring of hardware
213 * TX Descriptors. For convenience, it also contains a pointer to a parallel
214 * "Software Descriptor" array but we don't know anything about it here other
215 * than its type name.
216 */
217struct tx_desc {
218 /*
219 * Egress Queues are measured in units of SGE_EQ_IDXSIZE by the
220 * hardware: Sizes, Producer and Consumer indices, etc.
221 */
222 __be64 flit[SGE_EQ_IDXSIZE/sizeof(__be64)];
223};
224struct tx_sw_desc;
225struct sge_txq {
226 unsigned int in_use; /* # of in-use TX descriptors */
227 unsigned int size; /* # of descriptors */
228 unsigned int cidx; /* SW consumer index */
229 unsigned int pidx; /* producer index */
230 unsigned long stops; /* # of times queue has been stopped */
231 unsigned long restarts; /* # of queue restarts */
232
233 /*
234 * Write-once/infrequently fields.
235 * -------------------------------
236 */
237
238 unsigned int cntxt_id; /* SGE relative QID for the TX Q */
239 unsigned int abs_id; /* SGE absolute QID for the TX Q */
240 struct tx_desc *desc; /* address of HW TX descriptor ring */
241 struct tx_sw_desc *sdesc; /* address of SW TX descriptor ring */
242 struct sge_qstat *stat; /* queue status entry */
243 dma_addr_t phys_addr; /* PCI bus address of hardware ring */
244};
245
246/*
247 * State for an Ethernet Transmit Queue.
248 */
249struct sge_eth_txq {
250 struct sge_txq q; /* SGE TX Queue */
251 struct netdev_queue *txq; /* associated netdev TX queue */
252 unsigned long tso; /* # of TSO requests */
253 unsigned long tx_cso; /* # of TX checksum offloads */
254 unsigned long vlan_ins; /* # of TX VLAN insertions */
255 unsigned long mapping_err; /* # of I/O MMU packet mapping errors */
256};
257
258/*
259 * The complete set of Scatter/Gather Engine resources.
260 */
261struct sge {
262 /*
263 * Our "Queue Sets" ...
264 */
265 struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
266 struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
267
268 /*
269 * Extra ingress queues for asynchronous firmware events and
270 * forwarded interrupts (when in MSI mode).
271 */
272 struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
273
274 struct sge_rspq intrq ____cacheline_aligned_in_smp;
275 spinlock_t intrq_lock;
276
277 /*
278 * State for managing "starving Free Lists" -- Free Lists which have
279 * fallen below a certain threshold of buffers available to the
280 * hardware and attempts to refill them up to that threshold have
281 * failed. We have a regular "slow tick" timer process which will
282 * make periodic attempts to refill these starving Free Lists ...
283 */
284 DECLARE_BITMAP(starving_fl, MAX_EGRQ);
285 struct timer_list rx_timer;
286
287 /*
288 * State for cleaning up completed TX descriptors.
289 */
290 struct timer_list tx_timer;
291
292 /*
293 * Write-once/infrequently fields.
294 * -------------------------------
295 */
296
297 u16 max_ethqsets; /* # of available Ethernet queue sets */
298 u16 ethqsets; /* # of active Ethernet queue sets */
299 u16 ethtxq_rover; /* Tx queue to clean up next */
300 u16 timer_val[SGE_NTIMERS]; /* interrupt holdoff timer array */
301 u8 counter_val[SGE_NCOUNTERS]; /* interrupt RX threshold array */
302
303 /*
304 * Reverse maps from Absolute Queue IDs to associated queue pointers.
305 * The absolute Queue IDs are in a compact range which start at a
306 * [potentially large] Base Queue ID. We perform the reverse map by
307 * first converting the Absolute Queue ID into a Relative Queue ID by
308 * subtracting off the Base Queue ID and then use a Relative Queue ID
309 * indexed table to get the pointer to the corresponding software
310 * queue structure.
311 */
312 unsigned int egr_base;
313 unsigned int ingr_base;
314 void *egr_map[MAX_EGRQ];
315 struct sge_rspq *ingr_map[MAX_INGQ];
316};
317
318/*
319 * Utility macros to convert Absolute- to Relative-Queue indices and Egress-
320 * and Ingress-Queues. The EQ_MAP() and IQ_MAP() macros which provide
321 * pointers to Ingress- and Egress-Queues can be used as both L- and R-values
322 */
323#define EQ_IDX(s, abs_id) ((unsigned int)((abs_id) - (s)->egr_base))
324#define IQ_IDX(s, abs_id) ((unsigned int)((abs_id) - (s)->ingr_base))
325
326#define EQ_MAP(s, abs_id) ((s)->egr_map[EQ_IDX(s, abs_id)])
327#define IQ_MAP(s, abs_id) ((s)->ingr_map[IQ_IDX(s, abs_id)])
328
329/*
330 * Macro to iterate across Queue Sets ("rxq" is a historic misnomer).
331 */
332#define for_each_ethrxq(sge, iter) \
333 for (iter = 0; iter < (sge)->ethqsets; iter++)
334
335/*
336 * Per-"adapter" (Virtual Function) information.
337 */
338struct adapter {
339 /* PCI resources */
340 void __iomem *regs;
341 struct pci_dev *pdev;
342 struct device *pdev_dev;
343
344 /* "adapter" resources */
345 unsigned long registered_device_map;
346 unsigned long open_device_map;
347 unsigned long flags;
348 struct adapter_params params;
349
350 /* queue and interrupt resources */
351 struct {
352 unsigned short vec;
353 char desc[22];
354 } msix_info[MSIX_ENTRIES];
355 struct sge sge;
356
357 /* Linux network device resources */
358 struct net_device *port[MAX_NPORTS];
359 const char *name;
360 unsigned int msg_enable;
361
362 /* debugfs resources */
363 struct dentry *debugfs_root;
364
365 /* various locks */
366 spinlock_t stats_lock;
367};
368
369enum { /* adapter flags */
370 FULL_INIT_DONE = (1UL << 0),
371 USING_MSI = (1UL << 1),
372 USING_MSIX = (1UL << 2),
373 QUEUES_BOUND = (1UL << 3),
374};
375
376/*
377 * The following register read/write routine definitions are required by
378 * the common code.
379 */
380
381/**
382 * t4_read_reg - read a HW register
383 * @adapter: the adapter
384 * @reg_addr: the register address
385 *
386 * Returns the 32-bit value of the given HW register.
387 */
388static inline u32 t4_read_reg(struct adapter *adapter, u32 reg_addr)
389{
390 return readl(adapter->regs + reg_addr);
391}
392
393/**
394 * t4_write_reg - write a HW register
395 * @adapter: the adapter
396 * @reg_addr: the register address
397 * @val: the value to write
398 *
399 * Write a 32-bit value into the given HW register.
400 */
401static inline void t4_write_reg(struct adapter *adapter, u32 reg_addr, u32 val)
402{
403 writel(val, adapter->regs + reg_addr);
404}
405
406#ifndef readq
407static inline u64 readq(const volatile void __iomem *addr)
408{
409 return readl(addr) + ((u64)readl(addr + 4) << 32);
410}
411
412static inline void writeq(u64 val, volatile void __iomem *addr)
413{
414 writel(val, addr);
415 writel(val >> 32, addr + 4);
416}
417#endif
418
419/**
420 * t4_read_reg64 - read a 64-bit HW register
421 * @adapter: the adapter
422 * @reg_addr: the register address
423 *
424 * Returns the 64-bit value of the given HW register.
425 */
426static inline u64 t4_read_reg64(struct adapter *adapter, u32 reg_addr)
427{
428 return readq(adapter->regs + reg_addr);
429}
430
431/**
432 * t4_write_reg64 - write a 64-bit HW register
433 * @adapter: the adapter
434 * @reg_addr: the register address
435 * @val: the value to write
436 *
437 * Write a 64-bit value into the given HW register.
438 */
439static inline void t4_write_reg64(struct adapter *adapter, u32 reg_addr,
440 u64 val)
441{
442 writeq(val, adapter->regs + reg_addr);
443}
444
445/**
446 * port_name - return the string name of a port
447 * @adapter: the adapter
448 * @pidx: the port index
449 *
450 * Return the string name of the selected port.
451 */
452static inline const char *port_name(struct adapter *adapter, int pidx)
453{
454 return adapter->port[pidx]->name;
455}
456
457/**
458 * t4_os_set_hw_addr - store a port's MAC address in SW
459 * @adapter: the adapter
460 * @pidx: the port index
461 * @hw_addr: the Ethernet address
462 *
463 * Store the Ethernet address of the given port in SW. Called by the common
464 * code when it retrieves a port's Ethernet address from EEPROM.
465 */
466static inline void t4_os_set_hw_addr(struct adapter *adapter, int pidx,
467 u8 hw_addr[])
468{
469 memcpy(adapter->port[pidx]->dev_addr, hw_addr, ETH_ALEN);
470 memcpy(adapter->port[pidx]->perm_addr, hw_addr, ETH_ALEN);
471}
472
473/**
474 * netdev2pinfo - return the port_info structure associated with a net_device
475 * @dev: the netdev
476 *
477 * Return the struct port_info associated with a net_device
478 */
479static inline struct port_info *netdev2pinfo(const struct net_device *dev)
480{
481 return netdev_priv(dev);
482}
483
484/**
485 * adap2pinfo - return the port_info of a port
486 * @adap: the adapter
487 * @pidx: the port index
488 *
489 * Return the port_info structure for the adapter.
490 */
491static inline struct port_info *adap2pinfo(struct adapter *adapter, int pidx)
492{
493 return netdev_priv(adapter->port[pidx]);
494}
495
496/**
497 * netdev2adap - return the adapter structure associated with a net_device
498 * @dev: the netdev
499 *
500 * Return the struct adapter associated with a net_device
501 */
502static inline struct adapter *netdev2adap(const struct net_device *dev)
503{
504 return netdev2pinfo(dev)->adapter;
505}
506
507/*
508 * OS "Callback" function declarations. These are functions that the OS code
509 * is "contracted" to provide for the common code.
510 */
511void t4vf_os_link_changed(struct adapter *, int, int);
512
513/*
514 * SGE function prototype declarations.
515 */
516int t4vf_sge_alloc_rxq(struct adapter *, struct sge_rspq *, bool,
517 struct net_device *, int,
518 struct sge_fl *, rspq_handler_t);
519int t4vf_sge_alloc_eth_txq(struct adapter *, struct sge_eth_txq *,
520 struct net_device *, struct netdev_queue *,
521 unsigned int);
522void t4vf_free_sge_resources(struct adapter *);
523
524int t4vf_eth_xmit(struct sk_buff *, struct net_device *);
525int t4vf_ethrx_handler(struct sge_rspq *, const __be64 *,
526 const struct pkt_gl *);
527
528irq_handler_t t4vf_intr_handler(struct adapter *);
529irqreturn_t t4vf_sge_intr_msix(int, void *);
530
531int t4vf_sge_init(struct adapter *);
532void t4vf_sge_start(struct adapter *);
533void t4vf_sge_stop(struct adapter *);
534
535#endif /* __CXGB4VF_ADAPTER_H__ */