blob: 31d01284e333c80aeceb3d31083339593e0fabaf [file] [log] [blame]
Ben Hutchings8ceee662008-04-27 12:55:59 +01001/****************************************************************************
Ben Hutchingsf7a6d2c2013-08-29 23:32:48 +01002 * Driver for Solarflare network controllers and boards
Ben Hutchings8ceee662008-04-27 12:55:59 +01003 * Copyright 2005-2006 Fen Systems Ltd.
Ben Hutchingsf7a6d2c2013-08-29 23:32:48 +01004 * Copyright 2006-2013 Solarflare Communications Inc.
Ben Hutchings8ceee662008-04-27 12:55:59 +01005 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_EFX_H
12#define EFX_EFX_H
13
14#include "net_driver.h"
Ben Hutchings64eebcf2010-09-20 08:43:07 +000015#include "filter.h"
Ben Hutchings8ceee662008-04-27 12:55:59 +010016
Ben Hutchingsdc803df2009-10-23 08:32:33 +000017/* Solarstorm controllers use BAR 0 for I/O space and BAR 2(&3) for memory */
18#define EFX_MEM_BAR 2
19
Ben Hutchings8ceee662008-04-27 12:55:59 +010020/* TX */
Ben Hutchingsf5e7adc2009-11-23 16:07:30 +000021extern int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
22extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
23extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
Ben Hutchings60031fc2011-01-12 18:39:40 +000024extern void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
Ben Hutchingsf5e7adc2009-11-23 16:07:30 +000025extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
Ben Hutchingsf5e7adc2009-11-23 16:07:30 +000026extern netdev_tx_t
27efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
28extern netdev_tx_t
29efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
Ben Hutchings59cf09c2009-10-23 08:31:54 +000030extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
Ben Hutchings94b274b2011-01-10 21:18:20 +000031extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
Ben Hutchings7e6d06f2012-07-30 15:57:44 +000032extern unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
Ben Hutchings183233b2013-06-28 21:47:12 +010033extern unsigned int efx_piobuf_size;
Ben Hutchings8ceee662008-04-27 12:55:59 +010034
35/* RX */
Daniel Pieczko1648a232013-02-13 10:54:41 +000036extern void efx_rx_config_page_split(struct efx_nic *efx);
Ben Hutchingsf5e7adc2009-11-23 16:07:30 +000037extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
38extern void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
39extern void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
40extern void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
Ben Hutchingsf5e7adc2009-11-23 16:07:30 +000041extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
Steve Hodgson90d683a2010-06-01 11:19:39 +000042extern void efx_rx_slow_fill(unsigned long context);
Ben Hutchings85740cdf2013-01-29 23:33:15 +000043extern void __efx_rx_packet(struct efx_channel *channel);
44extern void efx_rx_packet(struct efx_rx_queue *rx_queue,
45 unsigned int index, unsigned int n_frags,
Ben Hutchingsdb339562011-08-26 18:05:11 +010046 unsigned int len, u16 flags);
Ben Hutchingsff734ef2013-01-29 23:33:14 +000047static inline void efx_rx_flush_packet(struct efx_channel *channel)
48{
Ben Hutchings85740cdf2013-01-29 23:33:15 +000049 if (channel->rx_pkt_n_frags)
50 __efx_rx_packet(channel);
Ben Hutchingsff734ef2013-01-29 23:33:14 +000051}
Steve Hodgson90d683a2010-06-01 11:19:39 +000052extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
Steve Hodgsonecc910f2010-09-10 06:42:22 +000053
54#define EFX_MAX_DMAQ_SIZE 4096UL
55#define EFX_DEFAULT_DMAQ_SIZE 1024UL
56#define EFX_MIN_DMAQ_SIZE 512UL
57
58#define EFX_MAX_EVQ_SIZE 16384UL
59#define EFX_MIN_EVQ_SIZE 512UL
Ben Hutchings8ceee662008-04-27 12:55:59 +010060
Ben Hutchings7e6d06f2012-07-30 15:57:44 +000061/* Maximum number of TCP segments we support for soft-TSO */
62#define EFX_TSO_MAX_SEGS 100
63
64/* The smallest [rt]xq_entries that the driver supports. RX minimum
65 * is a bit arbitrary. For TX, we must have space for at least 2
66 * TSO skbs.
67 */
68#define EFX_RXQ_MIN_ENT 128U
69#define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx))
Ben Hutchings46426102010-09-10 06:42:33 +000070
Ben Hutchings64eebcf2010-09-20 08:43:07 +000071/* Filters */
Ben Hutchingsadd72472012-11-08 01:46:53 +000072
73/**
74 * efx_filter_insert_filter - add or replace a filter
75 * @efx: NIC in which to insert the filter
76 * @spec: Specification for the filter
77 * @replace_equal: Flag for whether the specified filter may replace an
78 * existing filter with equal priority
79 *
80 * On success, return the filter ID.
81 * On failure, return a negative error code.
82 *
Ben Hutchingsb883d0b2013-01-15 22:00:07 +000083 * If existing filters have equal match values to the new filter spec,
84 * then the new filter might replace them or the function might fail,
85 * as follows.
86 *
87 * 1. If the existing filters have lower priority, or @replace_equal
88 * is set and they have equal priority, replace them.
89 *
90 * 2. If the existing filters have higher priority, return -%EPERM.
91 *
92 * 3. If !efx_filter_is_mc_recipient(@spec), or the NIC does not
93 * support delivery to multiple recipients, return -%EEXIST.
94 *
95 * This implies that filters for multiple multicast recipients must
96 * all be inserted with the same priority and @replace_equal = %false.
Ben Hutchingsadd72472012-11-08 01:46:53 +000097 */
98static inline s32 efx_filter_insert_filter(struct efx_nic *efx,
99 struct efx_filter_spec *spec,
100 bool replace_equal)
101{
102 return efx->type->filter_insert(efx, spec, replace_equal);
103}
104
105/**
106 * efx_filter_remove_id_safe - remove a filter by ID, carefully
107 * @efx: NIC from which to remove the filter
108 * @priority: Priority of filter, as passed to @efx_filter_insert_filter
109 * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
110 *
111 * This function will range-check @filter_id, so it is safe to call
112 * with a value passed from userland.
113 */
114static inline int efx_filter_remove_id_safe(struct efx_nic *efx,
115 enum efx_filter_priority priority,
116 u32 filter_id)
117{
118 return efx->type->filter_remove_safe(efx, priority, filter_id);
119}
120
121/**
122 * efx_filter_get_filter_safe - retrieve a filter by ID, carefully
123 * @efx: NIC from which to remove the filter
124 * @priority: Priority of filter, as passed to @efx_filter_insert_filter
125 * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
126 * @spec: Buffer in which to store filter specification
127 *
128 * This function will range-check @filter_id, so it is safe to call
129 * with a value passed from userland.
130 */
131static inline int
132efx_filter_get_filter_safe(struct efx_nic *efx,
133 enum efx_filter_priority priority,
134 u32 filter_id, struct efx_filter_spec *spec)
135{
136 return efx->type->filter_get_safe(efx, priority, filter_id, spec);
137}
138
139/**
140 * efx_farch_filter_clear_rx - remove RX filters by priority
141 * @efx: NIC from which to remove the filters
142 * @priority: Maximum priority to remove
143 */
144static inline void efx_filter_clear_rx(struct efx_nic *efx,
145 enum efx_filter_priority priority)
146{
147 return efx->type->filter_clear_rx(efx, priority);
148}
149
150static inline u32 efx_filter_count_rx_used(struct efx_nic *efx,
151 enum efx_filter_priority priority)
152{
153 return efx->type->filter_count_rx_used(efx, priority);
154}
155static inline u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
156{
157 return efx->type->filter_get_rx_id_limit(efx);
158}
159static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx,
160 enum efx_filter_priority priority,
161 u32 *buf, u32 size)
162{
163 return efx->type->filter_get_rx_ids(efx, priority, buf, size);
164}
Ben Hutchings64d8ad62011-01-05 00:50:41 +0000165#ifdef CONFIG_RFS_ACCEL
166extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
167 u16 rxq_index, u32 flow_id);
168extern bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota);
169static inline void efx_filter_rfs_expire(struct efx_channel *channel)
170{
171 if (channel->rfs_filters_added >= 60 &&
172 __efx_filter_rfs_expire(channel->efx, 100))
173 channel->rfs_filters_added -= 60;
174}
175#define efx_filter_rfs_enabled() 1
176#else
177static inline void efx_filter_rfs_expire(struct efx_channel *channel) {}
178#define efx_filter_rfs_enabled() 0
179#endif
Ben Hutchingsb883d0b2013-01-15 22:00:07 +0000180extern bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
Ben Hutchings64eebcf2010-09-20 08:43:07 +0000181
Ben Hutchings8ceee662008-04-27 12:55:59 +0100182/* Channels */
Ben Hutchings7f967c02012-02-13 23:45:02 +0000183extern int efx_channel_dummy_op_int(struct efx_channel *channel);
Stuart Hodgsonc31e5f92012-07-18 09:52:11 +0100184extern void efx_channel_dummy_op_void(struct efx_channel *channel);
Ben Hutchings46426102010-09-10 06:42:33 +0000185extern int
186efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100187
188/* Ports */
Ben Hutchingsd3245b22009-11-29 03:42:41 +0000189extern int efx_reconfigure_port(struct efx_nic *efx);
190extern int __efx_reconfigure_port(struct efx_nic *efx);
Ben Hutchings8c8661e2008-09-01 12:49:02 +0100191
Ben Hutchingsf5e7adc2009-11-23 16:07:30 +0000192/* Ethtool support */
Ben Hutchingsf5e7adc2009-11-23 16:07:30 +0000193extern const struct ethtool_ops efx_ethtool_ops;
194
Ben Hutchings8c8661e2008-09-01 12:49:02 +0100195/* Reset handling */
Ben Hutchingseb9f6742009-11-29 03:43:15 +0000196extern int efx_reset(struct efx_nic *efx, enum reset_type method);
Ben Hutchingsd3245b22009-11-29 03:42:41 +0000197extern void efx_reset_down(struct efx_nic *efx, enum reset_type method);
198extern int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
Alexandre Ramesb28405b2013-03-21 16:41:43 +0000199extern int efx_try_recovery(struct efx_nic *efx);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100200
201/* Global */
202extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
Ben Hutchings9e393b32011-09-05 07:43:04 +0000203extern int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
204 unsigned int rx_usecs, bool rx_adaptive,
205 bool rx_may_override_tx);
Ben Hutchingsa0c4faf2011-09-05 07:42:25 +0000206extern void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
207 unsigned int *rx_usecs, bool *rx_adaptive);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100208
209/* Dummy PHY ops for PHY drivers */
210extern int efx_port_dummy_op_int(struct efx_nic *efx);
211extern void efx_port_dummy_op_void(struct efx_nic *efx);
stephen hemmingerd2156972010-10-18 05:27:31 +0000212
Ben Hutchings8ceee662008-04-27 12:55:59 +0100213
Ben Hutchingsf4150722008-11-04 20:34:28 +0000214/* MTD */
215#ifdef CONFIG_SFC_MTD
Ben Hutchings45a3fd52012-11-28 04:38:14 +0000216extern int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
217 size_t n_parts, size_t sizeof_part);
218static inline int efx_mtd_probe(struct efx_nic *efx)
219{
220 return efx->type->mtd_probe(efx);
221}
Ben Hutchingsf4150722008-11-04 20:34:28 +0000222extern void efx_mtd_rename(struct efx_nic *efx);
223extern void efx_mtd_remove(struct efx_nic *efx);
224#else
225static inline int efx_mtd_probe(struct efx_nic *efx) { return 0; }
226static inline void efx_mtd_rename(struct efx_nic *efx) {}
227static inline void efx_mtd_remove(struct efx_nic *efx) {}
228#endif
Ben Hutchings8ceee662008-04-27 12:55:59 +0100229
Ben Hutchings8ceee662008-04-27 12:55:59 +0100230static inline void efx_schedule_channel(struct efx_channel *channel)
231{
Ben Hutchings62776d02010-06-23 11:30:07 +0000232 netif_vdbg(channel->efx, intr, channel->efx->net_dev,
233 "channel %d scheduling NAPI poll on CPU%d\n",
234 channel->channel, raw_smp_processor_id());
Ben Hutchings8ceee662008-04-27 12:55:59 +0100235
Ben Hutchings288379f2009-01-19 16:43:59 -0800236 napi_schedule(&channel->napi_str);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100237}
238
Ben Hutchings1646a6f32012-01-05 20:14:10 +0000239static inline void efx_schedule_channel_irq(struct efx_channel *channel)
240{
Ben Hutchingsdd407812012-02-28 23:40:21 +0000241 channel->event_test_cpu = raw_smp_processor_id();
Ben Hutchings1646a6f32012-01-05 20:14:10 +0000242 efx_schedule_channel(channel);
243}
244
Steve Hodgsonfdaa9ae2009-11-28 05:34:05 +0000245extern void efx_link_status_changed(struct efx_nic *efx);
Ben Hutchingsd3245b22009-11-29 03:42:41 +0000246extern void efx_link_set_advertising(struct efx_nic *efx, u32);
David S. Millerb56269462011-05-17 17:53:22 -0400247extern void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
Steve Hodgsonfdaa9ae2009-11-28 05:34:05 +0000248
Daniel Pieczkoc2f3b8e2012-10-17 13:21:23 +0100249static inline void efx_device_detach_sync(struct efx_nic *efx)
250{
251 struct net_device *dev = efx->net_dev;
252
253 /* Lock/freeze all TX queues so that we can be sure the
254 * TX scheduler is stopped when we're done and before
255 * netif_device_present() becomes false.
256 */
Ben Hutchings35205b22013-03-05 01:03:47 +0000257 netif_tx_lock_bh(dev);
Daniel Pieczkoc2f3b8e2012-10-17 13:21:23 +0100258 netif_device_detach(dev);
Ben Hutchings35205b22013-03-05 01:03:47 +0000259 netif_tx_unlock_bh(dev);
Daniel Pieczkoc2f3b8e2012-10-17 13:21:23 +0100260}
261
Ben Hutchings8ceee662008-04-27 12:55:59 +0100262#endif /* EFX_EFX_H */