blob: 29003fe9cb4c7e926eab95329ae1d1481a4ff137 [file] [log] [blame]
Ben Hutchings8ceee662008-04-27 12:55:59 +01001/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/module.h>
12#include <linux/pci.h>
13#include <linux/netdevice.h>
14#include <linux/etherdevice.h>
15#include <linux/delay.h>
16#include <linux/notifier.h>
17#include <linux/ip.h>
18#include <linux/tcp.h>
19#include <linux/in.h>
20#include <linux/crc32.h>
21#include <linux/ethtool.h>
Ben Hutchingsaa6ef272008-07-18 19:03:10 +010022#include <linux/topology.h>
Ben Hutchings8ceee662008-04-27 12:55:59 +010023#include "net_driver.h"
Ben Hutchings8ceee662008-04-27 12:55:59 +010024#include "ethtool.h"
25#include "tx.h"
26#include "rx.h"
27#include "efx.h"
28#include "mdio_10g.h"
29#include "falcon.h"
Ben Hutchings8ceee662008-04-27 12:55:59 +010030
31#define EFX_MAX_MTU (9 * 1024)
32
33/* RX slow fill workqueue. If memory allocation fails in the fast path,
34 * a work item is pushed onto this work queue to retry the allocation later,
35 * to avoid the NIC being starved of RX buffers. Since this is a per cpu
36 * workqueue, there is nothing to be gained in making it per NIC
37 */
38static struct workqueue_struct *refill_workqueue;
39
Steve Hodgson1ab00622008-12-12 21:33:02 -080040/* Reset workqueue. If any NIC has a hardware failure then a reset will be
41 * queued onto this work queue. This is not a per-nic work queue, because
42 * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
43 */
44static struct workqueue_struct *reset_workqueue;
45
Ben Hutchings8ceee662008-04-27 12:55:59 +010046/**************************************************************************
47 *
48 * Configurable values
49 *
50 *************************************************************************/
51
52/*
Ben Hutchings8ceee662008-04-27 12:55:59 +010053 * Use separate channels for TX and RX events
54 *
Neil Turton28b581a2008-12-12 21:41:06 -080055 * Set this to 1 to use separate channels for TX and RX. It allows us
56 * to control interrupt affinity separately for TX and RX.
Ben Hutchings8ceee662008-04-27 12:55:59 +010057 *
Neil Turton28b581a2008-12-12 21:41:06 -080058 * This is only used in MSI-X interrupt mode
Ben Hutchings8ceee662008-04-27 12:55:59 +010059 */
Neil Turton28b581a2008-12-12 21:41:06 -080060static unsigned int separate_tx_channels;
61module_param(separate_tx_channels, uint, 0644);
62MODULE_PARM_DESC(separate_tx_channels,
63 "Use separate channels for TX and RX");
Ben Hutchings8ceee662008-04-27 12:55:59 +010064
65/* This is the weight assigned to each of the (per-channel) virtual
66 * NAPI devices.
67 */
68static int napi_weight = 64;
69
70/* This is the time (in jiffies) between invocations of the hardware
71 * monitor, which checks for known hardware bugs and resets the
72 * hardware and driver as necessary.
73 */
74unsigned int efx_monitor_interval = 1 * HZ;
75
Ben Hutchings8ceee662008-04-27 12:55:59 +010076/* This controls whether or not the driver will initialise devices
77 * with invalid MAC addresses stored in the EEPROM or flash. If true,
78 * such devices will be initialised with a random locally-generated
79 * MAC address. This allows for loading the sfc_mtd driver to
80 * reprogram the flash, even if the flash contents (including the MAC
81 * address) have previously been erased.
82 */
83static unsigned int allow_bad_hwaddr;
84
85/* Initial interrupt moderation settings. They can be modified after
86 * module load with ethtool.
87 *
88 * The default for RX should strike a balance between increasing the
89 * round-trip latency and reducing overhead.
90 */
91static unsigned int rx_irq_mod_usec = 60;
92
93/* Initial interrupt moderation settings. They can be modified after
94 * module load with ethtool.
95 *
96 * This default is chosen to ensure that a 10G link does not go idle
97 * while a TX queue is stopped after it has become full. A queue is
98 * restarted when it drops below half full. The time this takes (assuming
99 * worst case 3 descriptors per packet and 1024 descriptors) is
100 * 512 / 3 * 1.2 = 205 usec.
101 */
102static unsigned int tx_irq_mod_usec = 150;
103
104/* This is the first interrupt mode to try out of:
105 * 0 => MSI-X
106 * 1 => MSI
107 * 2 => legacy
108 */
109static unsigned int interrupt_mode;
110
111/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
112 * i.e. the number of CPUs among which we may distribute simultaneous
113 * interrupt handling.
114 *
115 * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
116 * The default (0) means to assign an interrupt to each package (level II cache)
117 */
118static unsigned int rss_cpus;
119module_param(rss_cpus, uint, 0444);
120MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
121
Ben Hutchings84ae48f2008-12-12 21:34:54 -0800122static int phy_flash_cfg;
123module_param(phy_flash_cfg, int, 0644);
124MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
125
Ben Hutchings6fb70fd2009-03-20 13:30:37 +0000126static unsigned irq_adapt_low_thresh = 10000;
127module_param(irq_adapt_low_thresh, uint, 0644);
128MODULE_PARM_DESC(irq_adapt_low_thresh,
129 "Threshold score for reducing IRQ moderation");
130
131static unsigned irq_adapt_high_thresh = 20000;
132module_param(irq_adapt_high_thresh, uint, 0644);
133MODULE_PARM_DESC(irq_adapt_high_thresh,
134 "Threshold score for increasing IRQ moderation");
135
Ben Hutchings8ceee662008-04-27 12:55:59 +0100136/**************************************************************************
137 *
138 * Utility functions and prototypes
139 *
140 *************************************************************************/
141static void efx_remove_channel(struct efx_channel *channel);
142static void efx_remove_port(struct efx_nic *efx);
143static void efx_fini_napi(struct efx_nic *efx);
144static void efx_fini_channels(struct efx_nic *efx);
145
146#define EFX_ASSERT_RESET_SERIALISED(efx) \
147 do { \
Ben Hutchings3c787082008-09-01 12:49:08 +0100148 if (efx->state == STATE_RUNNING) \
Ben Hutchings8ceee662008-04-27 12:55:59 +0100149 ASSERT_RTNL(); \
150 } while (0)
151
152/**************************************************************************
153 *
154 * Event queue processing
155 *
156 *************************************************************************/
157
158/* Process channel's event queue
159 *
160 * This function is responsible for processing the event queue of a
161 * single channel. The caller must guarantee that this function will
162 * never be concurrently called more than once on the same channel,
163 * though different channels may be being processed concurrently.
164 */
Ben Hutchings4d566062008-09-01 12:47:12 +0100165static int efx_process_channel(struct efx_channel *channel, int rx_quota)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100166{
Ben Hutchings42cbe2d2008-09-01 12:48:08 +0100167 struct efx_nic *efx = channel->efx;
168 int rx_packets;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100169
Ben Hutchings42cbe2d2008-09-01 12:48:08 +0100170 if (unlikely(efx->reset_pending != RESET_TYPE_NONE ||
Ben Hutchings8ceee662008-04-27 12:55:59 +0100171 !channel->enabled))
Ben Hutchings42cbe2d2008-09-01 12:48:08 +0100172 return 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100173
Ben Hutchings42cbe2d2008-09-01 12:48:08 +0100174 rx_packets = falcon_process_eventq(channel, rx_quota);
175 if (rx_packets == 0)
176 return 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100177
178 /* Deliver last RX packet. */
179 if (channel->rx_pkt) {
180 __efx_rx_packet(channel, channel->rx_pkt,
181 channel->rx_pkt_csummed);
182 channel->rx_pkt = NULL;
183 }
184
Ben Hutchings8ceee662008-04-27 12:55:59 +0100185 efx_rx_strategy(channel);
186
Ben Hutchings42cbe2d2008-09-01 12:48:08 +0100187 efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100188
Ben Hutchings42cbe2d2008-09-01 12:48:08 +0100189 return rx_packets;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100190}
191
192/* Mark channel as finished processing
193 *
194 * Note that since we will not receive further interrupts for this
195 * channel before we finish processing and call the eventq_read_ack()
196 * method, there is no need to use the interrupt hold-off timers.
197 */
198static inline void efx_channel_processed(struct efx_channel *channel)
199{
Ben Hutchings5b9e2072008-05-16 21:18:14 +0100200 /* The interrupt handler for this channel may set work_pending
201 * as soon as we acknowledge the events we've seen. Make sure
202 * it's cleared before then. */
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100203 channel->work_pending = false;
Ben Hutchings5b9e2072008-05-16 21:18:14 +0100204 smp_wmb();
205
Ben Hutchings8ceee662008-04-27 12:55:59 +0100206 falcon_eventq_read_ack(channel);
207}
208
209/* NAPI poll handler
210 *
211 * NAPI guarantees serialisation of polls of the same device, which
212 * provides the guarantee required by efx_process_channel().
213 */
214static int efx_poll(struct napi_struct *napi, int budget)
215{
216 struct efx_channel *channel =
217 container_of(napi, struct efx_channel, napi_str);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100218 int rx_packets;
219
220 EFX_TRACE(channel->efx, "channel %d NAPI poll executing on CPU %d\n",
221 channel->channel, raw_smp_processor_id());
222
Ben Hutchings42cbe2d2008-09-01 12:48:08 +0100223 rx_packets = efx_process_channel(channel, budget);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100224
225 if (rx_packets < budget) {
Ben Hutchings6fb70fd2009-03-20 13:30:37 +0000226 struct efx_nic *efx = channel->efx;
227
228 if (channel->used_flags & EFX_USED_BY_RX &&
229 efx->irq_rx_adaptive &&
230 unlikely(++channel->irq_count == 1000)) {
Ben Hutchings6fb70fd2009-03-20 13:30:37 +0000231 if (unlikely(channel->irq_mod_score <
232 irq_adapt_low_thresh)) {
Ben Hutchings0d86ebd2009-10-23 08:32:13 +0000233 if (channel->irq_moderation > 1) {
234 channel->irq_moderation -= 1;
235 falcon_set_int_moderation(channel);
236 }
Ben Hutchings6fb70fd2009-03-20 13:30:37 +0000237 } else if (unlikely(channel->irq_mod_score >
238 irq_adapt_high_thresh)) {
Ben Hutchings0d86ebd2009-10-23 08:32:13 +0000239 if (channel->irq_moderation <
240 efx->irq_rx_moderation) {
241 channel->irq_moderation += 1;
242 falcon_set_int_moderation(channel);
243 }
Ben Hutchings6fb70fd2009-03-20 13:30:37 +0000244 }
Ben Hutchings6fb70fd2009-03-20 13:30:37 +0000245 channel->irq_count = 0;
246 channel->irq_mod_score = 0;
247 }
248
Ben Hutchings8ceee662008-04-27 12:55:59 +0100249 /* There is no race here; although napi_disable() will
Ben Hutchings288379f2009-01-19 16:43:59 -0800250 * only wait for napi_complete(), this isn't a problem
Ben Hutchings8ceee662008-04-27 12:55:59 +0100251 * since efx_channel_processed() will have no effect if
252 * interrupts have already been disabled.
253 */
Ben Hutchings288379f2009-01-19 16:43:59 -0800254 napi_complete(napi);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100255 efx_channel_processed(channel);
256 }
257
258 return rx_packets;
259}
260
261/* Process the eventq of the specified channel immediately on this CPU
262 *
263 * Disable hardware generated interrupts, wait for any existing
264 * processing to finish, then directly poll (and ack ) the eventq.
265 * Finally reenable NAPI and interrupts.
266 *
267 * Since we are touching interrupts the caller should hold the suspend lock
268 */
269void efx_process_channel_now(struct efx_channel *channel)
270{
271 struct efx_nic *efx = channel->efx;
272
273 BUG_ON(!channel->used_flags);
274 BUG_ON(!channel->enabled);
275
276 /* Disable interrupts and wait for ISRs to complete */
277 falcon_disable_interrupts(efx);
278 if (efx->legacy_irq)
279 synchronize_irq(efx->legacy_irq);
Ben Hutchings64ee3122008-09-01 12:47:38 +0100280 if (channel->irq)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100281 synchronize_irq(channel->irq);
282
283 /* Wait for any NAPI processing to complete */
284 napi_disable(&channel->napi_str);
285
286 /* Poll the channel */
Ben Hutchings3ffeabd2009-10-23 08:30:58 +0000287 efx_process_channel(channel, EFX_EVQ_SIZE);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100288
289 /* Ack the eventq. This may cause an interrupt to be generated
290 * when they are reenabled */
291 efx_channel_processed(channel);
292
293 napi_enable(&channel->napi_str);
294 falcon_enable_interrupts(efx);
295}
296
297/* Create event queue
298 * Event queue memory allocations are done only once. If the channel
299 * is reset, the memory buffer will be reused; this guards against
300 * errors during channel reset and also simplifies interrupt handling.
301 */
302static int efx_probe_eventq(struct efx_channel *channel)
303{
304 EFX_LOG(channel->efx, "chan %d create event queue\n", channel->channel);
305
306 return falcon_probe_eventq(channel);
307}
308
309/* Prepare channel's event queue */
Ben Hutchingsbc3c90a2008-09-01 12:48:46 +0100310static void efx_init_eventq(struct efx_channel *channel)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100311{
312 EFX_LOG(channel->efx, "chan %d init event queue\n", channel->channel);
313
314 channel->eventq_read_ptr = 0;
315
Ben Hutchingsbc3c90a2008-09-01 12:48:46 +0100316 falcon_init_eventq(channel);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100317}
318
319static void efx_fini_eventq(struct efx_channel *channel)
320{
321 EFX_LOG(channel->efx, "chan %d fini event queue\n", channel->channel);
322
323 falcon_fini_eventq(channel);
324}
325
326static void efx_remove_eventq(struct efx_channel *channel)
327{
328 EFX_LOG(channel->efx, "chan %d remove event queue\n", channel->channel);
329
330 falcon_remove_eventq(channel);
331}
332
333/**************************************************************************
334 *
335 * Channel handling
336 *
337 *************************************************************************/
338
Ben Hutchings8ceee662008-04-27 12:55:59 +0100339static int efx_probe_channel(struct efx_channel *channel)
340{
341 struct efx_tx_queue *tx_queue;
342 struct efx_rx_queue *rx_queue;
343 int rc;
344
345 EFX_LOG(channel->efx, "creating channel %d\n", channel->channel);
346
347 rc = efx_probe_eventq(channel);
348 if (rc)
349 goto fail1;
350
351 efx_for_each_channel_tx_queue(tx_queue, channel) {
352 rc = efx_probe_tx_queue(tx_queue);
353 if (rc)
354 goto fail2;
355 }
356
357 efx_for_each_channel_rx_queue(rx_queue, channel) {
358 rc = efx_probe_rx_queue(rx_queue);
359 if (rc)
360 goto fail3;
361 }
362
363 channel->n_rx_frm_trunc = 0;
364
365 return 0;
366
367 fail3:
368 efx_for_each_channel_rx_queue(rx_queue, channel)
369 efx_remove_rx_queue(rx_queue);
370 fail2:
371 efx_for_each_channel_tx_queue(tx_queue, channel)
372 efx_remove_tx_queue(tx_queue);
373 fail1:
374 return rc;
375}
376
377
Ben Hutchings56536e92008-12-12 21:37:02 -0800378static void efx_set_channel_names(struct efx_nic *efx)
379{
380 struct efx_channel *channel;
381 const char *type = "";
382 int number;
383
384 efx_for_each_channel(channel, efx) {
385 number = channel->channel;
386 if (efx->n_channels > efx->n_rx_queues) {
387 if (channel->channel < efx->n_rx_queues) {
388 type = "-rx";
389 } else {
390 type = "-tx";
391 number -= efx->n_rx_queues;
392 }
393 }
394 snprintf(channel->name, sizeof(channel->name),
395 "%s%s-%d", efx->name, type, number);
396 }
397}
398
Ben Hutchings8ceee662008-04-27 12:55:59 +0100399/* Channels are shutdown and reinitialised whilst the NIC is running
400 * to propagate configuration changes (mtu, checksum offload), or
401 * to clear hardware error conditions
402 */
Ben Hutchingsbc3c90a2008-09-01 12:48:46 +0100403static void efx_init_channels(struct efx_nic *efx)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100404{
405 struct efx_tx_queue *tx_queue;
406 struct efx_rx_queue *rx_queue;
407 struct efx_channel *channel;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100408
Ben Hutchingsf7f13b02008-05-16 21:15:06 +0100409 /* Calculate the rx buffer allocation parameters required to
410 * support the current MTU, including padding for header
411 * alignment and overruns.
412 */
413 efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
414 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
415 efx->type->rx_buffer_padding);
416 efx->rx_buffer_order = get_order(efx->rx_buffer_len);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100417
418 /* Initialise the channels */
419 efx_for_each_channel(channel, efx) {
420 EFX_LOG(channel->efx, "init chan %d\n", channel->channel);
421
Ben Hutchingsbc3c90a2008-09-01 12:48:46 +0100422 efx_init_eventq(channel);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100423
Ben Hutchingsbc3c90a2008-09-01 12:48:46 +0100424 efx_for_each_channel_tx_queue(tx_queue, channel)
425 efx_init_tx_queue(tx_queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100426
427 /* The rx buffer allocation strategy is MTU dependent */
428 efx_rx_strategy(channel);
429
Ben Hutchingsbc3c90a2008-09-01 12:48:46 +0100430 efx_for_each_channel_rx_queue(rx_queue, channel)
431 efx_init_rx_queue(rx_queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100432
433 WARN_ON(channel->rx_pkt != NULL);
434 efx_rx_strategy(channel);
435 }
Ben Hutchings8ceee662008-04-27 12:55:59 +0100436}
437
438/* This enables event queue processing and packet transmission.
439 *
440 * Note that this function is not allowed to fail, since that would
441 * introduce too much complexity into the suspend/resume path.
442 */
443static void efx_start_channel(struct efx_channel *channel)
444{
445 struct efx_rx_queue *rx_queue;
446
447 EFX_LOG(channel->efx, "starting chan %d\n", channel->channel);
448
Ben Hutchings5b9e2072008-05-16 21:18:14 +0100449 /* The interrupt handler for this channel may set work_pending
450 * as soon as we enable it. Make sure it's cleared before
451 * then. Similarly, make sure it sees the enabled flag set. */
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100452 channel->work_pending = false;
453 channel->enabled = true;
Ben Hutchings5b9e2072008-05-16 21:18:14 +0100454 smp_wmb();
Ben Hutchings8ceee662008-04-27 12:55:59 +0100455
456 napi_enable(&channel->napi_str);
457
458 /* Load up RX descriptors */
459 efx_for_each_channel_rx_queue(rx_queue, channel)
460 efx_fast_push_rx_descriptors(rx_queue);
461}
462
463/* This disables event queue processing and packet transmission.
464 * This function does not guarantee that all queue processing
465 * (e.g. RX refill) is complete.
466 */
467static void efx_stop_channel(struct efx_channel *channel)
468{
469 struct efx_rx_queue *rx_queue;
470
471 if (!channel->enabled)
472 return;
473
474 EFX_LOG(channel->efx, "stop chan %d\n", channel->channel);
475
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100476 channel->enabled = false;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100477 napi_disable(&channel->napi_str);
478
479 /* Ensure that any worker threads have exited or will be no-ops */
480 efx_for_each_channel_rx_queue(rx_queue, channel) {
481 spin_lock_bh(&rx_queue->add_lock);
482 spin_unlock_bh(&rx_queue->add_lock);
483 }
484}
485
486static void efx_fini_channels(struct efx_nic *efx)
487{
488 struct efx_channel *channel;
489 struct efx_tx_queue *tx_queue;
490 struct efx_rx_queue *rx_queue;
Ben Hutchings6bc5d3a2008-09-01 12:49:37 +0100491 int rc;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100492
493 EFX_ASSERT_RESET_SERIALISED(efx);
494 BUG_ON(efx->port_enabled);
495
Ben Hutchings6bc5d3a2008-09-01 12:49:37 +0100496 rc = falcon_flush_queues(efx);
497 if (rc)
498 EFX_ERR(efx, "failed to flush queues\n");
499 else
500 EFX_LOG(efx, "successfully flushed all queues\n");
501
Ben Hutchings8ceee662008-04-27 12:55:59 +0100502 efx_for_each_channel(channel, efx) {
503 EFX_LOG(channel->efx, "shut down chan %d\n", channel->channel);
504
505 efx_for_each_channel_rx_queue(rx_queue, channel)
506 efx_fini_rx_queue(rx_queue);
507 efx_for_each_channel_tx_queue(tx_queue, channel)
508 efx_fini_tx_queue(tx_queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100509 efx_fini_eventq(channel);
510 }
511}
512
513static void efx_remove_channel(struct efx_channel *channel)
514{
515 struct efx_tx_queue *tx_queue;
516 struct efx_rx_queue *rx_queue;
517
518 EFX_LOG(channel->efx, "destroy chan %d\n", channel->channel);
519
520 efx_for_each_channel_rx_queue(rx_queue, channel)
521 efx_remove_rx_queue(rx_queue);
522 efx_for_each_channel_tx_queue(tx_queue, channel)
523 efx_remove_tx_queue(tx_queue);
524 efx_remove_eventq(channel);
525
526 channel->used_flags = 0;
527}
528
529void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay)
530{
531 queue_delayed_work(refill_workqueue, &rx_queue->work, delay);
532}
533
534/**************************************************************************
535 *
536 * Port handling
537 *
538 **************************************************************************/
539
540/* This ensures that the kernel is kept informed (via
541 * netif_carrier_on/off) of the link status, and also maintains the
542 * link status's stop on the port's TX queue.
543 */
544static void efx_link_status_changed(struct efx_nic *efx)
545{
Ben Hutchings8ceee662008-04-27 12:55:59 +0100546 /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
547 * that no events are triggered between unregister_netdev() and the
548 * driver unloading. A more general condition is that NETDEV_CHANGE
549 * can only be generated between NETDEV_UP and NETDEV_DOWN */
550 if (!netif_running(efx->net_dev))
551 return;
552
Ben Hutchings8c8661e2008-09-01 12:49:02 +0100553 if (efx->port_inhibited) {
554 netif_carrier_off(efx->net_dev);
555 return;
556 }
557
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100558 if (efx->link_up != netif_carrier_ok(efx->net_dev)) {
Ben Hutchings8ceee662008-04-27 12:55:59 +0100559 efx->n_link_state_changes++;
560
561 if (efx->link_up)
562 netif_carrier_on(efx->net_dev);
563 else
564 netif_carrier_off(efx->net_dev);
565 }
566
567 /* Status message for kernel log */
568 if (efx->link_up) {
Ben Hutchingsf31a45d2008-12-12 21:43:33 -0800569 EFX_INFO(efx, "link up at %uMbps %s-duplex (MTU %d)%s\n",
570 efx->link_speed, efx->link_fd ? "full" : "half",
Ben Hutchings8ceee662008-04-27 12:55:59 +0100571 efx->net_dev->mtu,
572 (efx->promiscuous ? " [PROMISC]" : ""));
573 } else {
574 EFX_INFO(efx, "link down\n");
575 }
576
577}
578
Ben Hutchings115122a2009-03-04 09:52:52 +0000579static void efx_fini_port(struct efx_nic *efx);
580
Ben Hutchings8ceee662008-04-27 12:55:59 +0100581/* This call reinitialises the MAC to pick up new PHY settings. The
582 * caller must hold the mac_lock */
Ben Hutchings8c8661e2008-09-01 12:49:02 +0100583void __efx_reconfigure_port(struct efx_nic *efx)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100584{
585 WARN_ON(!mutex_is_locked(&efx->mac_lock));
586
587 EFX_LOG(efx, "reconfiguring MAC from PHY settings on CPU %d\n",
588 raw_smp_processor_id());
589
Ben Hutchingsa816f752008-09-01 12:49:12 +0100590 /* Serialise the promiscuous flag with efx_set_multicast_list. */
591 if (efx_dev_registered(efx)) {
592 netif_addr_lock_bh(efx->net_dev);
593 netif_addr_unlock_bh(efx->net_dev);
594 }
595
Ben Hutchings177dfcd2008-12-12 21:50:08 -0800596 falcon_deconfigure_mac_wrapper(efx);
597
598 /* Reconfigure the PHY, disabling transmit in mac level loopback. */
599 if (LOOPBACK_INTERNAL(efx))
600 efx->phy_mode |= PHY_MODE_TX_DISABLED;
601 else
602 efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
603 efx->phy_op->reconfigure(efx);
604
605 if (falcon_switch_mac(efx))
606 goto fail;
607
608 efx->mac_op->reconfigure(efx);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100609
610 /* Inform kernel of loss/gain of carrier */
611 efx_link_status_changed(efx);
Ben Hutchings177dfcd2008-12-12 21:50:08 -0800612 return;
613
614fail:
615 EFX_ERR(efx, "failed to reconfigure MAC\n");
Ben Hutchings115122a2009-03-04 09:52:52 +0000616 efx->port_enabled = false;
617 efx_fini_port(efx);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100618}
619
620/* Reinitialise the MAC to pick up new PHY settings, even if the port is
621 * disabled. */
622void efx_reconfigure_port(struct efx_nic *efx)
623{
624 EFX_ASSERT_RESET_SERIALISED(efx);
625
626 mutex_lock(&efx->mac_lock);
627 __efx_reconfigure_port(efx);
628 mutex_unlock(&efx->mac_lock);
629}
630
631/* Asynchronous efx_reconfigure_port work item. To speed up efx_flush_all()
632 * we don't efx_reconfigure_port() if the port is disabled. Care is taken
633 * in efx_stop_all() and efx_start_port() to prevent PHY events being lost */
Ben Hutchings766ca0f2008-12-12 21:59:24 -0800634static void efx_phy_work(struct work_struct *data)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100635{
Ben Hutchings766ca0f2008-12-12 21:59:24 -0800636 struct efx_nic *efx = container_of(data, struct efx_nic, phy_work);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100637
638 mutex_lock(&efx->mac_lock);
639 if (efx->port_enabled)
640 __efx_reconfigure_port(efx);
641 mutex_unlock(&efx->mac_lock);
642}
643
Ben Hutchings766ca0f2008-12-12 21:59:24 -0800644static void efx_mac_work(struct work_struct *data)
645{
646 struct efx_nic *efx = container_of(data, struct efx_nic, mac_work);
647
648 mutex_lock(&efx->mac_lock);
649 if (efx->port_enabled)
650 efx->mac_op->irq(efx);
651 mutex_unlock(&efx->mac_lock);
652}
653
Ben Hutchings8ceee662008-04-27 12:55:59 +0100654static int efx_probe_port(struct efx_nic *efx)
655{
656 int rc;
657
658 EFX_LOG(efx, "create port\n");
659
660 /* Connect up MAC/PHY operations table and read MAC address */
661 rc = falcon_probe_port(efx);
662 if (rc)
663 goto err;
664
Ben Hutchings84ae48f2008-12-12 21:34:54 -0800665 if (phy_flash_cfg)
666 efx->phy_mode = PHY_MODE_SPECIAL;
667
Ben Hutchings8ceee662008-04-27 12:55:59 +0100668 /* Sanity check MAC address */
669 if (is_valid_ether_addr(efx->mac_address)) {
670 memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN);
671 } else {
Johannes Berge1749612008-10-27 15:59:26 -0700672 EFX_ERR(efx, "invalid MAC address %pM\n",
673 efx->mac_address);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100674 if (!allow_bad_hwaddr) {
675 rc = -EINVAL;
676 goto err;
677 }
678 random_ether_addr(efx->net_dev->dev_addr);
Johannes Berge1749612008-10-27 15:59:26 -0700679 EFX_INFO(efx, "using locally-generated MAC %pM\n",
680 efx->net_dev->dev_addr);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100681 }
682
683 return 0;
684
685 err:
686 efx_remove_port(efx);
687 return rc;
688}
689
690static int efx_init_port(struct efx_nic *efx)
691{
692 int rc;
693
694 EFX_LOG(efx, "init port\n");
695
Ben Hutchings177dfcd2008-12-12 21:50:08 -0800696 rc = efx->phy_op->init(efx);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100697 if (rc)
698 return rc;
Ben Hutchings177dfcd2008-12-12 21:50:08 -0800699 mutex_lock(&efx->mac_lock);
Steve Hodgson4b988282009-01-29 17:50:51 +0000700 efx->phy_op->reconfigure(efx);
Ben Hutchings177dfcd2008-12-12 21:50:08 -0800701 rc = falcon_switch_mac(efx);
702 mutex_unlock(&efx->mac_lock);
703 if (rc)
704 goto fail;
705 efx->mac_op->reconfigure(efx);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100706
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100707 efx->port_initialized = true;
Ben Hutchings1974cc22009-01-29 18:00:07 +0000708 efx_stats_enable(efx);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100709 return 0;
Ben Hutchings177dfcd2008-12-12 21:50:08 -0800710
711fail:
712 efx->phy_op->fini(efx);
713 return rc;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100714}
715
716/* Allow efx_reconfigure_port() to be scheduled, and close the window
717 * between efx_stop_port and efx_flush_all whereby a previously scheduled
Ben Hutchings766ca0f2008-12-12 21:59:24 -0800718 * efx_phy_work()/efx_mac_work() may have been cancelled */
Ben Hutchings8ceee662008-04-27 12:55:59 +0100719static void efx_start_port(struct efx_nic *efx)
720{
721 EFX_LOG(efx, "start port\n");
722 BUG_ON(efx->port_enabled);
723
724 mutex_lock(&efx->mac_lock);
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100725 efx->port_enabled = true;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100726 __efx_reconfigure_port(efx);
Ben Hutchings766ca0f2008-12-12 21:59:24 -0800727 efx->mac_op->irq(efx);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100728 mutex_unlock(&efx->mac_lock);
729}
730
Ben Hutchings766ca0f2008-12-12 21:59:24 -0800731/* Prevent efx_phy_work, efx_mac_work, and efx_monitor() from executing,
732 * and efx_set_multicast_list() from scheduling efx_phy_work. efx_phy_work
733 * and efx_mac_work may still be scheduled via NAPI processing until
734 * efx_flush_all() is called */
Ben Hutchings8ceee662008-04-27 12:55:59 +0100735static void efx_stop_port(struct efx_nic *efx)
736{
737 EFX_LOG(efx, "stop port\n");
738
739 mutex_lock(&efx->mac_lock);
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100740 efx->port_enabled = false;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100741 mutex_unlock(&efx->mac_lock);
742
743 /* Serialise against efx_set_multicast_list() */
Ben Hutchings55668612008-05-16 21:16:10 +0100744 if (efx_dev_registered(efx)) {
David S. Millerb9e40852008-07-15 00:15:08 -0700745 netif_addr_lock_bh(efx->net_dev);
746 netif_addr_unlock_bh(efx->net_dev);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100747 }
748}
749
750static void efx_fini_port(struct efx_nic *efx)
751{
752 EFX_LOG(efx, "shut down port\n");
753
754 if (!efx->port_initialized)
755 return;
756
Ben Hutchings1974cc22009-01-29 18:00:07 +0000757 efx_stats_disable(efx);
Ben Hutchings177dfcd2008-12-12 21:50:08 -0800758 efx->phy_op->fini(efx);
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100759 efx->port_initialized = false;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100760
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100761 efx->link_up = false;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100762 efx_link_status_changed(efx);
763}
764
765static void efx_remove_port(struct efx_nic *efx)
766{
767 EFX_LOG(efx, "destroying port\n");
768
769 falcon_remove_port(efx);
770}
771
772/**************************************************************************
773 *
774 * NIC handling
775 *
776 **************************************************************************/
777
778/* This configures the PCI device to enable I/O and DMA. */
779static int efx_init_io(struct efx_nic *efx)
780{
781 struct pci_dev *pci_dev = efx->pci_dev;
782 dma_addr_t dma_mask = efx->type->max_dma_mask;
783 int rc;
784
785 EFX_LOG(efx, "initialising I/O\n");
786
787 rc = pci_enable_device(pci_dev);
788 if (rc) {
789 EFX_ERR(efx, "failed to enable PCI device\n");
790 goto fail1;
791 }
792
793 pci_set_master(pci_dev);
794
795 /* Set the PCI DMA mask. Try all possibilities from our
796 * genuine mask down to 32 bits, because some architectures
797 * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
798 * masks event though they reject 46 bit masks.
799 */
800 while (dma_mask > 0x7fffffffUL) {
801 if (pci_dma_supported(pci_dev, dma_mask) &&
802 ((rc = pci_set_dma_mask(pci_dev, dma_mask)) == 0))
803 break;
804 dma_mask >>= 1;
805 }
806 if (rc) {
807 EFX_ERR(efx, "could not find a suitable DMA mask\n");
808 goto fail2;
809 }
810 EFX_LOG(efx, "using DMA mask %llx\n", (unsigned long long) dma_mask);
811 rc = pci_set_consistent_dma_mask(pci_dev, dma_mask);
812 if (rc) {
813 /* pci_set_consistent_dma_mask() is not *allowed* to
814 * fail with a mask that pci_set_dma_mask() accepted,
815 * but just in case...
816 */
817 EFX_ERR(efx, "failed to set consistent DMA mask\n");
818 goto fail2;
819 }
820
Ben Hutchingsdc803df2009-10-23 08:32:33 +0000821 efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR);
822 rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc");
Ben Hutchings8ceee662008-04-27 12:55:59 +0100823 if (rc) {
824 EFX_ERR(efx, "request for memory BAR failed\n");
825 rc = -EIO;
826 goto fail3;
827 }
828 efx->membase = ioremap_nocache(efx->membase_phys,
829 efx->type->mem_map_size);
830 if (!efx->membase) {
Ben Hutchingsdc803df2009-10-23 08:32:33 +0000831 EFX_ERR(efx, "could not map memory BAR at %llx+%x\n",
Ben Hutchings086ea352008-05-16 21:17:06 +0100832 (unsigned long long)efx->membase_phys,
Ben Hutchings8ceee662008-04-27 12:55:59 +0100833 efx->type->mem_map_size);
834 rc = -ENOMEM;
835 goto fail4;
836 }
Ben Hutchingsdc803df2009-10-23 08:32:33 +0000837 EFX_LOG(efx, "memory BAR at %llx+%x (virtual %p)\n",
838 (unsigned long long)efx->membase_phys,
Ben Hutchings086ea352008-05-16 21:17:06 +0100839 efx->type->mem_map_size, efx->membase);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100840
841 return 0;
842
843 fail4:
Ben Hutchingsdc803df2009-10-23 08:32:33 +0000844 pci_release_region(efx->pci_dev, EFX_MEM_BAR);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100845 fail3:
Ben Hutchings2c118e02008-05-16 21:15:29 +0100846 efx->membase_phys = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100847 fail2:
848 pci_disable_device(efx->pci_dev);
849 fail1:
850 return rc;
851}
852
853static void efx_fini_io(struct efx_nic *efx)
854{
855 EFX_LOG(efx, "shutting down I/O\n");
856
857 if (efx->membase) {
858 iounmap(efx->membase);
859 efx->membase = NULL;
860 }
861
862 if (efx->membase_phys) {
Ben Hutchingsdc803df2009-10-23 08:32:33 +0000863 pci_release_region(efx->pci_dev, EFX_MEM_BAR);
Ben Hutchings2c118e02008-05-16 21:15:29 +0100864 efx->membase_phys = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100865 }
866
867 pci_disable_device(efx->pci_dev);
868}
869
Ben Hutchings46123d02008-09-01 12:47:33 +0100870/* Get number of RX queues wanted. Return number of online CPU
871 * packages in the expectation that an IRQ balancer will spread
872 * interrupts across them. */
873static int efx_wanted_rx_queues(void)
874{
Rusty Russell2f8975f2009-01-10 21:58:09 -0800875 cpumask_var_t core_mask;
Ben Hutchings46123d02008-09-01 12:47:33 +0100876 int count;
877 int cpu;
878
Li Zefan79f55992009-06-15 14:58:26 +0800879 if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) {
Rusty Russell2f8975f2009-01-10 21:58:09 -0800880 printk(KERN_WARNING
Mike Travis3977d032009-05-12 10:48:36 +0000881 "sfc: RSS disabled due to allocation failure\n");
Rusty Russell2f8975f2009-01-10 21:58:09 -0800882 return 1;
883 }
884
Ben Hutchings46123d02008-09-01 12:47:33 +0100885 count = 0;
886 for_each_online_cpu(cpu) {
Rusty Russell2f8975f2009-01-10 21:58:09 -0800887 if (!cpumask_test_cpu(cpu, core_mask)) {
Ben Hutchings46123d02008-09-01 12:47:33 +0100888 ++count;
Rusty Russell2f8975f2009-01-10 21:58:09 -0800889 cpumask_or(core_mask, core_mask,
Rusty Russellfbd59a82009-01-10 21:58:08 -0800890 topology_core_cpumask(cpu));
Ben Hutchings46123d02008-09-01 12:47:33 +0100891 }
892 }
893
Rusty Russell2f8975f2009-01-10 21:58:09 -0800894 free_cpumask_var(core_mask);
Ben Hutchings46123d02008-09-01 12:47:33 +0100895 return count;
896}
897
898/* Probe the number and type of interrupts we are able to obtain, and
899 * the resulting numbers of channels and RX queues.
900 */
Ben Hutchings8ceee662008-04-27 12:55:59 +0100901static void efx_probe_interrupts(struct efx_nic *efx)
902{
Ben Hutchings46123d02008-09-01 12:47:33 +0100903 int max_channels =
904 min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100905 int rc, i;
906
907 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
Ben Hutchings46123d02008-09-01 12:47:33 +0100908 struct msix_entry xentries[EFX_MAX_CHANNELS];
909 int wanted_ints;
Neil Turton28b581a2008-12-12 21:41:06 -0800910 int rx_queues;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100911
Ben Hutchings46123d02008-09-01 12:47:33 +0100912 /* We want one RX queue and interrupt per CPU package
913 * (or as specified by the rss_cpus module parameter).
914 * We will need one channel per interrupt.
915 */
Neil Turton28b581a2008-12-12 21:41:06 -0800916 rx_queues = rss_cpus ? rss_cpus : efx_wanted_rx_queues();
917 wanted_ints = rx_queues + (separate_tx_channels ? 1 : 0);
918 wanted_ints = min(wanted_ints, max_channels);
Ben Hutchingsaa6ef272008-07-18 19:03:10 +0100919
Neil Turton28b581a2008-12-12 21:41:06 -0800920 for (i = 0; i < wanted_ints; i++)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100921 xentries[i].entry = i;
Neil Turton28b581a2008-12-12 21:41:06 -0800922 rc = pci_enable_msix(efx->pci_dev, xentries, wanted_ints);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100923 if (rc > 0) {
Neil Turton28b581a2008-12-12 21:41:06 -0800924 EFX_ERR(efx, "WARNING: Insufficient MSI-X vectors"
925 " available (%d < %d).\n", rc, wanted_ints);
926 EFX_ERR(efx, "WARNING: Performance may be reduced.\n");
927 EFX_BUG_ON_PARANOID(rc >= wanted_ints);
928 wanted_ints = rc;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100929 rc = pci_enable_msix(efx->pci_dev, xentries,
Neil Turton28b581a2008-12-12 21:41:06 -0800930 wanted_ints);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100931 }
932
933 if (rc == 0) {
Neil Turton28b581a2008-12-12 21:41:06 -0800934 efx->n_rx_queues = min(rx_queues, wanted_ints);
935 efx->n_channels = wanted_ints;
936 for (i = 0; i < wanted_ints; i++)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100937 efx->channel[i].irq = xentries[i].vector;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100938 } else {
939 /* Fall back to single channel MSI */
940 efx->interrupt_mode = EFX_INT_MODE_MSI;
941 EFX_ERR(efx, "could not enable MSI-X\n");
942 }
943 }
944
945 /* Try single interrupt MSI */
946 if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
Ben Hutchings8831da72008-09-01 12:47:48 +0100947 efx->n_rx_queues = 1;
Neil Turton28b581a2008-12-12 21:41:06 -0800948 efx->n_channels = 1;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100949 rc = pci_enable_msi(efx->pci_dev);
950 if (rc == 0) {
951 efx->channel[0].irq = efx->pci_dev->irq;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100952 } else {
953 EFX_ERR(efx, "could not enable MSI\n");
954 efx->interrupt_mode = EFX_INT_MODE_LEGACY;
955 }
956 }
957
958 /* Assume legacy interrupts */
959 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
Ben Hutchings8831da72008-09-01 12:47:48 +0100960 efx->n_rx_queues = 1;
Neil Turton28b581a2008-12-12 21:41:06 -0800961 efx->n_channels = 1 + (separate_tx_channels ? 1 : 0);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100962 efx->legacy_irq = efx->pci_dev->irq;
963 }
964}
965
966static void efx_remove_interrupts(struct efx_nic *efx)
967{
968 struct efx_channel *channel;
969
970 /* Remove MSI/MSI-X interrupts */
Ben Hutchings64ee3122008-09-01 12:47:38 +0100971 efx_for_each_channel(channel, efx)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100972 channel->irq = 0;
973 pci_disable_msi(efx->pci_dev);
974 pci_disable_msix(efx->pci_dev);
975
976 /* Remove legacy interrupt */
977 efx->legacy_irq = 0;
978}
979
Ben Hutchings8831da72008-09-01 12:47:48 +0100980static void efx_set_channels(struct efx_nic *efx)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100981{
982 struct efx_tx_queue *tx_queue;
983 struct efx_rx_queue *rx_queue;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100984
Ben Hutchings60ac1062008-09-01 12:44:59 +0100985 efx_for_each_tx_queue(tx_queue, efx) {
Neil Turton28b581a2008-12-12 21:41:06 -0800986 if (separate_tx_channels)
987 tx_queue->channel = &efx->channel[efx->n_channels-1];
Ben Hutchings60ac1062008-09-01 12:44:59 +0100988 else
989 tx_queue->channel = &efx->channel[0];
990 tx_queue->channel->used_flags |= EFX_USED_BY_TX;
991 }
Ben Hutchings8ceee662008-04-27 12:55:59 +0100992
Ben Hutchings8831da72008-09-01 12:47:48 +0100993 efx_for_each_rx_queue(rx_queue, efx) {
994 rx_queue->channel = &efx->channel[rx_queue->queue];
995 rx_queue->channel->used_flags |= EFX_USED_BY_RX;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100996 }
997}
998
999static int efx_probe_nic(struct efx_nic *efx)
1000{
1001 int rc;
1002
1003 EFX_LOG(efx, "creating NIC\n");
1004
1005 /* Carry out hardware-type specific initialisation */
1006 rc = falcon_probe_nic(efx);
1007 if (rc)
1008 return rc;
1009
1010 /* Determine the number of channels and RX queues by trying to hook
1011 * in MSI-X interrupts. */
1012 efx_probe_interrupts(efx);
1013
Ben Hutchings8831da72008-09-01 12:47:48 +01001014 efx_set_channels(efx);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001015
1016 /* Initialise the interrupt moderation settings */
Ben Hutchings6fb70fd2009-03-20 13:30:37 +00001017 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001018
1019 return 0;
1020}
1021
1022static void efx_remove_nic(struct efx_nic *efx)
1023{
1024 EFX_LOG(efx, "destroying NIC\n");
1025
1026 efx_remove_interrupts(efx);
1027 falcon_remove_nic(efx);
1028}
1029
1030/**************************************************************************
1031 *
1032 * NIC startup/shutdown
1033 *
1034 *************************************************************************/
1035
1036static int efx_probe_all(struct efx_nic *efx)
1037{
1038 struct efx_channel *channel;
1039 int rc;
1040
1041 /* Create NIC */
1042 rc = efx_probe_nic(efx);
1043 if (rc) {
1044 EFX_ERR(efx, "failed to create NIC\n");
1045 goto fail1;
1046 }
1047
1048 /* Create port */
1049 rc = efx_probe_port(efx);
1050 if (rc) {
1051 EFX_ERR(efx, "failed to create port\n");
1052 goto fail2;
1053 }
1054
1055 /* Create channels */
1056 efx_for_each_channel(channel, efx) {
1057 rc = efx_probe_channel(channel);
1058 if (rc) {
1059 EFX_ERR(efx, "failed to create channel %d\n",
1060 channel->channel);
1061 goto fail3;
1062 }
1063 }
Ben Hutchings56536e92008-12-12 21:37:02 -08001064 efx_set_channel_names(efx);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001065
1066 return 0;
1067
1068 fail3:
1069 efx_for_each_channel(channel, efx)
1070 efx_remove_channel(channel);
1071 efx_remove_port(efx);
1072 fail2:
1073 efx_remove_nic(efx);
1074 fail1:
1075 return rc;
1076}
1077
1078/* Called after previous invocation(s) of efx_stop_all, restarts the
1079 * port, kernel transmit queue, NAPI processing and hardware interrupts,
1080 * and ensures that the port is scheduled to be reconfigured.
1081 * This function is safe to call multiple times when the NIC is in any
1082 * state. */
1083static void efx_start_all(struct efx_nic *efx)
1084{
1085 struct efx_channel *channel;
1086
1087 EFX_ASSERT_RESET_SERIALISED(efx);
1088
1089 /* Check that it is appropriate to restart the interface. All
1090 * of these flags are safe to read under just the rtnl lock */
1091 if (efx->port_enabled)
1092 return;
1093 if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
1094 return;
Ben Hutchings55668612008-05-16 21:16:10 +01001095 if (efx_dev_registered(efx) && !netif_running(efx->net_dev))
Ben Hutchings8ceee662008-04-27 12:55:59 +01001096 return;
1097
1098 /* Mark the port as enabled so port reconfigurations can start, then
1099 * restart the transmit interface early so the watchdog timer stops */
1100 efx_start_port(efx);
Steve Hodgsondacccc72008-09-01 12:48:20 +01001101 if (efx_dev_registered(efx))
1102 efx_wake_queue(efx);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001103
1104 efx_for_each_channel(channel, efx)
1105 efx_start_channel(channel);
1106
1107 falcon_enable_interrupts(efx);
1108
1109 /* Start hardware monitor if we're in RUNNING */
1110 if (efx->state == STATE_RUNNING)
1111 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1112 efx_monitor_interval);
1113}
1114
1115/* Flush all delayed work. Should only be called when no more delayed work
1116 * will be scheduled. This doesn't flush pending online resets (efx_reset),
1117 * since we're holding the rtnl_lock at this point. */
1118static void efx_flush_all(struct efx_nic *efx)
1119{
1120 struct efx_rx_queue *rx_queue;
1121
1122 /* Make sure the hardware monitor is stopped */
1123 cancel_delayed_work_sync(&efx->monitor_work);
1124
1125 /* Ensure that all RX slow refills are complete. */
Ben Hutchingsb3475642008-05-16 21:15:49 +01001126 efx_for_each_rx_queue(rx_queue, efx)
Ben Hutchings8ceee662008-04-27 12:55:59 +01001127 cancel_delayed_work_sync(&rx_queue->work);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001128
1129 /* Stop scheduled port reconfigurations */
Ben Hutchings766ca0f2008-12-12 21:59:24 -08001130 cancel_work_sync(&efx->mac_work);
1131 cancel_work_sync(&efx->phy_work);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001132
1133}
1134
1135/* Quiesce hardware and software without bringing the link down.
1136 * Safe to call multiple times, when the nic and interface is in any
1137 * state. The caller is guaranteed to subsequently be in a position
1138 * to modify any hardware and software state they see fit without
1139 * taking locks. */
1140static void efx_stop_all(struct efx_nic *efx)
1141{
1142 struct efx_channel *channel;
1143
1144 EFX_ASSERT_RESET_SERIALISED(efx);
1145
1146 /* port_enabled can be read safely under the rtnl lock */
1147 if (!efx->port_enabled)
1148 return;
1149
1150 /* Disable interrupts and wait for ISR to complete */
1151 falcon_disable_interrupts(efx);
1152 if (efx->legacy_irq)
1153 synchronize_irq(efx->legacy_irq);
Ben Hutchings64ee3122008-09-01 12:47:38 +01001154 efx_for_each_channel(channel, efx) {
Ben Hutchings8ceee662008-04-27 12:55:59 +01001155 if (channel->irq)
1156 synchronize_irq(channel->irq);
Ben Hutchingsb3475642008-05-16 21:15:49 +01001157 }
Ben Hutchings8ceee662008-04-27 12:55:59 +01001158
1159 /* Stop all NAPI processing and synchronous rx refills */
1160 efx_for_each_channel(channel, efx)
1161 efx_stop_channel(channel);
1162
1163 /* Stop all asynchronous port reconfigurations. Since all
1164 * event processing has already been stopped, there is no
1165 * window to loose phy events */
1166 efx_stop_port(efx);
1167
Ben Hutchings766ca0f2008-12-12 21:59:24 -08001168 /* Flush efx_phy_work, efx_mac_work, refill_workqueue, monitor_work */
Ben Hutchings8ceee662008-04-27 12:55:59 +01001169 efx_flush_all(efx);
1170
1171 /* Isolate the MAC from the TX and RX engines, so that queue
1172 * flushes will complete in a timely fashion. */
Ben Hutchings5c8af3b2009-08-26 08:18:13 +00001173 falcon_deconfigure_mac_wrapper(efx);
1174 msleep(10); /* Let the Rx FIFO drain */
Ben Hutchings8ceee662008-04-27 12:55:59 +01001175 falcon_drain_tx_fifo(efx);
1176
1177 /* Stop the kernel transmit interface late, so the watchdog
1178 * timer isn't ticking over the flush */
Ben Hutchings55668612008-05-16 21:16:10 +01001179 if (efx_dev_registered(efx)) {
Steve Hodgsondacccc72008-09-01 12:48:20 +01001180 efx_stop_queue(efx);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001181 netif_tx_lock_bh(efx->net_dev);
1182 netif_tx_unlock_bh(efx->net_dev);
1183 }
1184}
1185
1186static void efx_remove_all(struct efx_nic *efx)
1187{
1188 struct efx_channel *channel;
1189
1190 efx_for_each_channel(channel, efx)
1191 efx_remove_channel(channel);
1192 efx_remove_port(efx);
1193 efx_remove_nic(efx);
1194}
1195
1196/* A convinience function to safely flush all the queues */
Ben Hutchingsbc3c90a2008-09-01 12:48:46 +01001197void efx_flush_queues(struct efx_nic *efx)
Ben Hutchings8ceee662008-04-27 12:55:59 +01001198{
Ben Hutchings8ceee662008-04-27 12:55:59 +01001199 EFX_ASSERT_RESET_SERIALISED(efx);
1200
1201 efx_stop_all(efx);
1202
1203 efx_fini_channels(efx);
Ben Hutchingsbc3c90a2008-09-01 12:48:46 +01001204 efx_init_channels(efx);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001205
1206 efx_start_all(efx);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001207}
1208
1209/**************************************************************************
1210 *
1211 * Interrupt moderation
1212 *
1213 **************************************************************************/
1214
Ben Hutchings0d86ebd2009-10-23 08:32:13 +00001215static unsigned irq_mod_ticks(int usecs, int resolution)
1216{
1217 if (usecs <= 0)
1218 return 0; /* cannot receive interrupts ahead of time :-) */
1219 if (usecs < resolution)
1220 return 1; /* never round down to 0 */
1221 return usecs / resolution;
1222}
1223
Ben Hutchings8ceee662008-04-27 12:55:59 +01001224/* Set interrupt moderation parameters */
Ben Hutchings6fb70fd2009-03-20 13:30:37 +00001225void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs,
1226 bool rx_adaptive)
Ben Hutchings8ceee662008-04-27 12:55:59 +01001227{
1228 struct efx_tx_queue *tx_queue;
1229 struct efx_rx_queue *rx_queue;
Ben Hutchings0d86ebd2009-10-23 08:32:13 +00001230 unsigned tx_ticks = irq_mod_ticks(tx_usecs, FALCON_IRQ_MOD_RESOLUTION);
1231 unsigned rx_ticks = irq_mod_ticks(rx_usecs, FALCON_IRQ_MOD_RESOLUTION);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001232
1233 EFX_ASSERT_RESET_SERIALISED(efx);
1234
1235 efx_for_each_tx_queue(tx_queue, efx)
Ben Hutchings0d86ebd2009-10-23 08:32:13 +00001236 tx_queue->channel->irq_moderation = tx_ticks;
Ben Hutchings8ceee662008-04-27 12:55:59 +01001237
Ben Hutchings6fb70fd2009-03-20 13:30:37 +00001238 efx->irq_rx_adaptive = rx_adaptive;
Ben Hutchings0d86ebd2009-10-23 08:32:13 +00001239 efx->irq_rx_moderation = rx_ticks;
Ben Hutchings8ceee662008-04-27 12:55:59 +01001240 efx_for_each_rx_queue(rx_queue, efx)
Ben Hutchings0d86ebd2009-10-23 08:32:13 +00001241 rx_queue->channel->irq_moderation = rx_ticks;
Ben Hutchings8ceee662008-04-27 12:55:59 +01001242}
1243
1244/**************************************************************************
1245 *
1246 * Hardware monitor
1247 *
1248 **************************************************************************/
1249
1250/* Run periodically off the general workqueue. Serialised against
1251 * efx_reconfigure_port via the mac_lock */
1252static void efx_monitor(struct work_struct *data)
1253{
1254 struct efx_nic *efx = container_of(data, struct efx_nic,
1255 monitor_work.work);
Ben Hutchings766ca0f2008-12-12 21:59:24 -08001256 int rc;
Ben Hutchings8ceee662008-04-27 12:55:59 +01001257
1258 EFX_TRACE(efx, "hardware monitor executing on CPU %d\n",
1259 raw_smp_processor_id());
1260
Ben Hutchings8ceee662008-04-27 12:55:59 +01001261 /* If the mac_lock is already held then it is likely a port
1262 * reconfiguration is already in place, which will likely do
1263 * most of the work of check_hw() anyway. */
Ben Hutchings766ca0f2008-12-12 21:59:24 -08001264 if (!mutex_trylock(&efx->mac_lock))
1265 goto out_requeue;
1266 if (!efx->port_enabled)
1267 goto out_unlock;
1268 rc = efx->board_info.monitor(efx);
1269 if (rc) {
1270 EFX_ERR(efx, "Board sensor %s; shutting down PHY\n",
1271 (rc == -ERANGE) ? "reported fault" : "failed");
1272 efx->phy_mode |= PHY_MODE_LOW_POWER;
1273 falcon_sim_phy_event(efx);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001274 }
Ben Hutchings766ca0f2008-12-12 21:59:24 -08001275 efx->phy_op->poll(efx);
1276 efx->mac_op->poll(efx);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001277
Ben Hutchings766ca0f2008-12-12 21:59:24 -08001278out_unlock:
Ben Hutchings8ceee662008-04-27 12:55:59 +01001279 mutex_unlock(&efx->mac_lock);
Ben Hutchings766ca0f2008-12-12 21:59:24 -08001280out_requeue:
Ben Hutchings8ceee662008-04-27 12:55:59 +01001281 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1282 efx_monitor_interval);
1283}
1284
1285/**************************************************************************
1286 *
1287 * ioctls
1288 *
1289 *************************************************************************/
1290
1291/* Net device ioctl
1292 * Context: process, rtnl_lock() held.
1293 */
1294static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
1295{
Ben Hutchings767e4682008-09-01 12:43:14 +01001296 struct efx_nic *efx = netdev_priv(net_dev);
Ben Hutchings68e7f452009-04-29 08:05:08 +00001297 struct mii_ioctl_data *data = if_mii(ifr);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001298
1299 EFX_ASSERT_RESET_SERIALISED(efx);
1300
Ben Hutchings68e7f452009-04-29 08:05:08 +00001301 /* Convert phy_id from older PRTAD/DEVAD format */
1302 if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
1303 (data->phy_id & 0xfc00) == 0x0400)
1304 data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400;
1305
1306 return mdio_mii_ioctl(&efx->mdio, data, cmd);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001307}
1308
1309/**************************************************************************
1310 *
1311 * NAPI interface
1312 *
1313 **************************************************************************/
1314
1315static int efx_init_napi(struct efx_nic *efx)
1316{
1317 struct efx_channel *channel;
Ben Hutchings8ceee662008-04-27 12:55:59 +01001318
1319 efx_for_each_channel(channel, efx) {
1320 channel->napi_dev = efx->net_dev;
Ben Hutchings718cff12009-04-14 19:47:46 -07001321 netif_napi_add(channel->napi_dev, &channel->napi_str,
1322 efx_poll, napi_weight);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001323 }
1324 return 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +01001325}
1326
1327static void efx_fini_napi(struct efx_nic *efx)
1328{
1329 struct efx_channel *channel;
1330
1331 efx_for_each_channel(channel, efx) {
Ben Hutchings718cff12009-04-14 19:47:46 -07001332 if (channel->napi_dev)
1333 netif_napi_del(&channel->napi_str);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001334 channel->napi_dev = NULL;
1335 }
1336}
1337
1338/**************************************************************************
1339 *
1340 * Kernel netpoll interface
1341 *
1342 *************************************************************************/
1343
1344#ifdef CONFIG_NET_POLL_CONTROLLER
1345
1346/* Although in the common case interrupts will be disabled, this is not
1347 * guaranteed. However, all our work happens inside the NAPI callback,
1348 * so no locking is required.
1349 */
1350static void efx_netpoll(struct net_device *net_dev)
1351{
Ben Hutchings767e4682008-09-01 12:43:14 +01001352 struct efx_nic *efx = netdev_priv(net_dev);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001353 struct efx_channel *channel;
1354
Ben Hutchings64ee3122008-09-01 12:47:38 +01001355 efx_for_each_channel(channel, efx)
Ben Hutchings8ceee662008-04-27 12:55:59 +01001356 efx_schedule_channel(channel);
1357}
1358
1359#endif
1360
1361/**************************************************************************
1362 *
1363 * Kernel net device interface
1364 *
1365 *************************************************************************/
1366
1367/* Context: process, rtnl_lock() held. */
1368static int efx_net_open(struct net_device *net_dev)
1369{
Ben Hutchings767e4682008-09-01 12:43:14 +01001370 struct efx_nic *efx = netdev_priv(net_dev);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001371 EFX_ASSERT_RESET_SERIALISED(efx);
1372
1373 EFX_LOG(efx, "opening device %s on CPU %d\n", net_dev->name,
1374 raw_smp_processor_id());
1375
Ben Hutchingsf4bd9542008-12-26 13:48:51 -08001376 if (efx->state == STATE_DISABLED)
1377 return -EIO;
Ben Hutchingsf8b87c12008-09-01 12:48:17 +01001378 if (efx->phy_mode & PHY_MODE_SPECIAL)
1379 return -EBUSY;
1380
Ben Hutchings8ceee662008-04-27 12:55:59 +01001381 efx_start_all(efx);
1382 return 0;
1383}
1384
1385/* Context: process, rtnl_lock() held.
1386 * Note that the kernel will ignore our return code; this method
1387 * should really be a void.
1388 */
1389static int efx_net_stop(struct net_device *net_dev)
1390{
Ben Hutchings767e4682008-09-01 12:43:14 +01001391 struct efx_nic *efx = netdev_priv(net_dev);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001392
1393 EFX_LOG(efx, "closing %s on CPU %d\n", net_dev->name,
1394 raw_smp_processor_id());
1395
Ben Hutchingsf4bd9542008-12-26 13:48:51 -08001396 if (efx->state != STATE_DISABLED) {
1397 /* Stop the device and flush all the channels */
1398 efx_stop_all(efx);
1399 efx_fini_channels(efx);
1400 efx_init_channels(efx);
1401 }
Ben Hutchings8ceee662008-04-27 12:55:59 +01001402
1403 return 0;
1404}
1405
Ben Hutchings1974cc22009-01-29 18:00:07 +00001406void efx_stats_disable(struct efx_nic *efx)
1407{
1408 spin_lock(&efx->stats_lock);
1409 ++efx->stats_disable_count;
1410 spin_unlock(&efx->stats_lock);
1411}
1412
1413void efx_stats_enable(struct efx_nic *efx)
1414{
1415 spin_lock(&efx->stats_lock);
1416 --efx->stats_disable_count;
1417 spin_unlock(&efx->stats_lock);
1418}
1419
Ben Hutchings5b9e2072008-05-16 21:18:14 +01001420/* Context: process, dev_base_lock or RTNL held, non-blocking. */
Ben Hutchings8ceee662008-04-27 12:55:59 +01001421static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
1422{
Ben Hutchings767e4682008-09-01 12:43:14 +01001423 struct efx_nic *efx = netdev_priv(net_dev);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001424 struct efx_mac_stats *mac_stats = &efx->mac_stats;
1425 struct net_device_stats *stats = &net_dev->stats;
1426
Ben Hutchings5b9e2072008-05-16 21:18:14 +01001427 /* Update stats if possible, but do not wait if another thread
Ben Hutchings1974cc22009-01-29 18:00:07 +00001428 * is updating them or if MAC stats fetches are temporarily
1429 * disabled; slightly stale stats are acceptable.
Ben Hutchings5b9e2072008-05-16 21:18:14 +01001430 */
Ben Hutchings8ceee662008-04-27 12:55:59 +01001431 if (!spin_trylock(&efx->stats_lock))
1432 return stats;
Ben Hutchings1974cc22009-01-29 18:00:07 +00001433 if (!efx->stats_disable_count) {
Ben Hutchings177dfcd2008-12-12 21:50:08 -08001434 efx->mac_op->update_stats(efx);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001435 falcon_update_nic_stats(efx);
1436 }
1437 spin_unlock(&efx->stats_lock);
1438
1439 stats->rx_packets = mac_stats->rx_packets;
1440 stats->tx_packets = mac_stats->tx_packets;
1441 stats->rx_bytes = mac_stats->rx_bytes;
1442 stats->tx_bytes = mac_stats->tx_bytes;
1443 stats->multicast = mac_stats->rx_multicast;
1444 stats->collisions = mac_stats->tx_collision;
1445 stats->rx_length_errors = (mac_stats->rx_gtjumbo +
1446 mac_stats->rx_length_error);
1447 stats->rx_over_errors = efx->n_rx_nodesc_drop_cnt;
1448 stats->rx_crc_errors = mac_stats->rx_bad;
1449 stats->rx_frame_errors = mac_stats->rx_align_error;
1450 stats->rx_fifo_errors = mac_stats->rx_overflow;
1451 stats->rx_missed_errors = mac_stats->rx_missed;
1452 stats->tx_window_errors = mac_stats->tx_late_collision;
1453
1454 stats->rx_errors = (stats->rx_length_errors +
1455 stats->rx_over_errors +
1456 stats->rx_crc_errors +
1457 stats->rx_frame_errors +
1458 stats->rx_fifo_errors +
1459 stats->rx_missed_errors +
1460 mac_stats->rx_symbol_error);
1461 stats->tx_errors = (stats->tx_window_errors +
1462 mac_stats->tx_bad);
1463
1464 return stats;
1465}
1466
1467/* Context: netif_tx_lock held, BHs disabled. */
1468static void efx_watchdog(struct net_device *net_dev)
1469{
Ben Hutchings767e4682008-09-01 12:43:14 +01001470 struct efx_nic *efx = netdev_priv(net_dev);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001471
Ben Hutchings739bb23d2008-11-04 20:35:36 +00001472 EFX_ERR(efx, "TX stuck with stop_count=%d port_enabled=%d:"
1473 " resetting channels\n",
1474 atomic_read(&efx->netif_stop_count), efx->port_enabled);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001475
Ben Hutchings739bb23d2008-11-04 20:35:36 +00001476 efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001477}
1478
1479
1480/* Context: process, rtnl_lock() held. */
1481static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
1482{
Ben Hutchings767e4682008-09-01 12:43:14 +01001483 struct efx_nic *efx = netdev_priv(net_dev);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001484 int rc = 0;
1485
1486 EFX_ASSERT_RESET_SERIALISED(efx);
1487
1488 if (new_mtu > EFX_MAX_MTU)
1489 return -EINVAL;
1490
1491 efx_stop_all(efx);
1492
1493 EFX_LOG(efx, "changing MTU to %d\n", new_mtu);
1494
1495 efx_fini_channels(efx);
1496 net_dev->mtu = new_mtu;
Ben Hutchingsbc3c90a2008-09-01 12:48:46 +01001497 efx_init_channels(efx);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001498
1499 efx_start_all(efx);
1500 return rc;
Ben Hutchings8ceee662008-04-27 12:55:59 +01001501}
1502
1503static int efx_set_mac_address(struct net_device *net_dev, void *data)
1504{
Ben Hutchings767e4682008-09-01 12:43:14 +01001505 struct efx_nic *efx = netdev_priv(net_dev);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001506 struct sockaddr *addr = data;
1507 char *new_addr = addr->sa_data;
1508
1509 EFX_ASSERT_RESET_SERIALISED(efx);
1510
1511 if (!is_valid_ether_addr(new_addr)) {
Johannes Berge1749612008-10-27 15:59:26 -07001512 EFX_ERR(efx, "invalid ethernet MAC address requested: %pM\n",
1513 new_addr);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001514 return -EINVAL;
1515 }
1516
1517 memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len);
1518
1519 /* Reconfigure the MAC */
1520 efx_reconfigure_port(efx);
1521
1522 return 0;
1523}
1524
Ben Hutchingsa816f752008-09-01 12:49:12 +01001525/* Context: netif_addr_lock held, BHs disabled. */
Ben Hutchings8ceee662008-04-27 12:55:59 +01001526static void efx_set_multicast_list(struct net_device *net_dev)
1527{
Ben Hutchings767e4682008-09-01 12:43:14 +01001528 struct efx_nic *efx = netdev_priv(net_dev);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001529 struct dev_mc_list *mc_list = net_dev->mc_list;
1530 union efx_multicast_hash *mc_hash = &efx->multicast_hash;
Ben Hutchingsa816f752008-09-01 12:49:12 +01001531 bool promiscuous = !!(net_dev->flags & IFF_PROMISC);
1532 bool changed = (efx->promiscuous != promiscuous);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001533 u32 crc;
1534 int bit;
1535 int i;
1536
Ben Hutchingsa816f752008-09-01 12:49:12 +01001537 efx->promiscuous = promiscuous;
Ben Hutchings8ceee662008-04-27 12:55:59 +01001538
1539 /* Build multicast hash table */
1540 if (promiscuous || (net_dev->flags & IFF_ALLMULTI)) {
1541 memset(mc_hash, 0xff, sizeof(*mc_hash));
1542 } else {
1543 memset(mc_hash, 0x00, sizeof(*mc_hash));
1544 for (i = 0; i < net_dev->mc_count; i++) {
1545 crc = ether_crc_le(ETH_ALEN, mc_list->dmi_addr);
1546 bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
1547 set_bit_le(bit, mc_hash->byte);
1548 mc_list = mc_list->next;
1549 }
1550 }
1551
Ben Hutchingsa816f752008-09-01 12:49:12 +01001552 if (!efx->port_enabled)
1553 /* Delay pushing settings until efx_start_port() */
1554 return;
1555
1556 if (changed)
Ben Hutchings766ca0f2008-12-12 21:59:24 -08001557 queue_work(efx->workqueue, &efx->phy_work);
Ben Hutchingsa816f752008-09-01 12:49:12 +01001558
Ben Hutchings8ceee662008-04-27 12:55:59 +01001559 /* Create and activate new global multicast hash table */
1560 falcon_set_multicast_hash(efx);
1561}
1562
Stephen Hemmingerc3ecb9f2008-11-21 17:32:54 -08001563static const struct net_device_ops efx_netdev_ops = {
1564 .ndo_open = efx_net_open,
1565 .ndo_stop = efx_net_stop,
1566 .ndo_get_stats = efx_net_stats,
1567 .ndo_tx_timeout = efx_watchdog,
1568 .ndo_start_xmit = efx_hard_start_xmit,
1569 .ndo_validate_addr = eth_validate_addr,
1570 .ndo_do_ioctl = efx_ioctl,
1571 .ndo_change_mtu = efx_change_mtu,
1572 .ndo_set_mac_address = efx_set_mac_address,
1573 .ndo_set_multicast_list = efx_set_multicast_list,
1574#ifdef CONFIG_NET_POLL_CONTROLLER
1575 .ndo_poll_controller = efx_netpoll,
1576#endif
1577};
1578
Ben Hutchings7dde5962008-12-12 22:09:38 -08001579static void efx_update_name(struct efx_nic *efx)
1580{
1581 strcpy(efx->name, efx->net_dev->name);
1582 efx_mtd_rename(efx);
1583 efx_set_channel_names(efx);
1584}
1585
Ben Hutchings8ceee662008-04-27 12:55:59 +01001586static int efx_netdev_event(struct notifier_block *this,
1587 unsigned long event, void *ptr)
1588{
Ben Hutchingsd3208b52008-05-16 21:20:00 +01001589 struct net_device *net_dev = ptr;
Ben Hutchings8ceee662008-04-27 12:55:59 +01001590
Ben Hutchings7dde5962008-12-12 22:09:38 -08001591 if (net_dev->netdev_ops == &efx_netdev_ops &&
1592 event == NETDEV_CHANGENAME)
1593 efx_update_name(netdev_priv(net_dev));
Ben Hutchings8ceee662008-04-27 12:55:59 +01001594
1595 return NOTIFY_DONE;
1596}
1597
1598static struct notifier_block efx_netdev_notifier = {
1599 .notifier_call = efx_netdev_event,
1600};
1601
Ben Hutchings06d5e192008-12-12 21:47:23 -08001602static ssize_t
1603show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
1604{
1605 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
1606 return sprintf(buf, "%d\n", efx->phy_type);
1607}
1608static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL);
1609
Ben Hutchings8ceee662008-04-27 12:55:59 +01001610static int efx_register_netdev(struct efx_nic *efx)
1611{
1612 struct net_device *net_dev = efx->net_dev;
1613 int rc;
1614
1615 net_dev->watchdog_timeo = 5 * HZ;
1616 net_dev->irq = efx->pci_dev->irq;
Stephen Hemmingerc3ecb9f2008-11-21 17:32:54 -08001617 net_dev->netdev_ops = &efx_netdev_ops;
Ben Hutchings8ceee662008-04-27 12:55:59 +01001618 SET_NETDEV_DEV(net_dev, &efx->pci_dev->dev);
1619 SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
1620
Ben Hutchings8ceee662008-04-27 12:55:59 +01001621 /* Clear MAC statistics */
Ben Hutchings177dfcd2008-12-12 21:50:08 -08001622 efx->mac_op->update_stats(efx);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001623 memset(&efx->mac_stats, 0, sizeof(efx->mac_stats));
1624
Ben Hutchings7dde5962008-12-12 22:09:38 -08001625 rtnl_lock();
Ben Hutchingsaed06282009-08-26 08:16:27 +00001626
1627 rc = dev_alloc_name(net_dev, net_dev->name);
1628 if (rc < 0)
1629 goto fail_locked;
Ben Hutchings7dde5962008-12-12 22:09:38 -08001630 efx_update_name(efx);
Ben Hutchingsaed06282009-08-26 08:16:27 +00001631
1632 rc = register_netdevice(net_dev);
1633 if (rc)
1634 goto fail_locked;
1635
1636 /* Always start with carrier off; PHY events will detect the link */
1637 netif_carrier_off(efx->net_dev);
1638
Ben Hutchings7dde5962008-12-12 22:09:38 -08001639 rtnl_unlock();
Ben Hutchings8ceee662008-04-27 12:55:59 +01001640
Ben Hutchings06d5e192008-12-12 21:47:23 -08001641 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
1642 if (rc) {
1643 EFX_ERR(efx, "failed to init net dev attributes\n");
1644 goto fail_registered;
1645 }
1646
Ben Hutchings8ceee662008-04-27 12:55:59 +01001647 return 0;
Ben Hutchings06d5e192008-12-12 21:47:23 -08001648
Ben Hutchingsaed06282009-08-26 08:16:27 +00001649fail_locked:
1650 rtnl_unlock();
1651 EFX_ERR(efx, "could not register net dev\n");
1652 return rc;
1653
Ben Hutchings06d5e192008-12-12 21:47:23 -08001654fail_registered:
1655 unregister_netdev(net_dev);
1656 return rc;
Ben Hutchings8ceee662008-04-27 12:55:59 +01001657}
1658
1659static void efx_unregister_netdev(struct efx_nic *efx)
1660{
1661 struct efx_tx_queue *tx_queue;
1662
1663 if (!efx->net_dev)
1664 return;
1665
Ben Hutchings767e4682008-09-01 12:43:14 +01001666 BUG_ON(netdev_priv(efx->net_dev) != efx);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001667
1668 /* Free up any skbs still remaining. This has to happen before
1669 * we try to unregister the netdev as running their destructors
1670 * may be needed to get the device ref. count to 0. */
1671 efx_for_each_tx_queue(tx_queue, efx)
1672 efx_release_tx_buffers(tx_queue);
1673
Ben Hutchings55668612008-05-16 21:16:10 +01001674 if (efx_dev_registered(efx)) {
Ben Hutchings8ceee662008-04-27 12:55:59 +01001675 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
Ben Hutchings06d5e192008-12-12 21:47:23 -08001676 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001677 unregister_netdev(efx->net_dev);
1678 }
1679}
1680
1681/**************************************************************************
1682 *
1683 * Device reset and suspend
1684 *
1685 **************************************************************************/
1686
Ben Hutchings2467ca42008-09-01 12:48:50 +01001687/* Tears down the entire software state and most of the hardware state
1688 * before reset. */
Steve Hodgson4b988282009-01-29 17:50:51 +00001689void efx_reset_down(struct efx_nic *efx, enum reset_type method,
1690 struct ethtool_cmd *ecmd)
Ben Hutchings8ceee662008-04-27 12:55:59 +01001691{
Ben Hutchings8ceee662008-04-27 12:55:59 +01001692 EFX_ASSERT_RESET_SERIALISED(efx);
1693
Ben Hutchings1974cc22009-01-29 18:00:07 +00001694 efx_stats_disable(efx);
Ben Hutchings2467ca42008-09-01 12:48:50 +01001695 efx_stop_all(efx);
1696 mutex_lock(&efx->mac_lock);
Ben Hutchingsf4150722008-11-04 20:34:28 +00001697 mutex_lock(&efx->spi_lock);
Ben Hutchings2467ca42008-09-01 12:48:50 +01001698
Ben Hutchings177dfcd2008-12-12 21:50:08 -08001699 efx->phy_op->get_settings(efx, ecmd);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001700
1701 efx_fini_channels(efx);
Steve Hodgson4b988282009-01-29 17:50:51 +00001702 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
1703 efx->phy_op->fini(efx);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001704}
1705
Ben Hutchings2467ca42008-09-01 12:48:50 +01001706/* This function will always ensure that the locks acquired in
1707 * efx_reset_down() are released. A failure return code indicates
1708 * that we were unable to reinitialise the hardware, and the
1709 * driver should be disabled. If ok is false, then the rx and tx
1710 * engines are not restarted, pending a RESET_DISABLE. */
Steve Hodgson4b988282009-01-29 17:50:51 +00001711int efx_reset_up(struct efx_nic *efx, enum reset_type method,
1712 struct ethtool_cmd *ecmd, bool ok)
Ben Hutchings8ceee662008-04-27 12:55:59 +01001713{
1714 int rc;
1715
Ben Hutchings2467ca42008-09-01 12:48:50 +01001716 EFX_ASSERT_RESET_SERIALISED(efx);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001717
Ben Hutchings2467ca42008-09-01 12:48:50 +01001718 rc = falcon_init_nic(efx);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001719 if (rc) {
Ben Hutchings2467ca42008-09-01 12:48:50 +01001720 EFX_ERR(efx, "failed to initialise NIC\n");
1721 ok = false;
Ben Hutchings8ceee662008-04-27 12:55:59 +01001722 }
1723
Steve Hodgson4b988282009-01-29 17:50:51 +00001724 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) {
1725 if (ok) {
1726 rc = efx->phy_op->init(efx);
1727 if (rc)
1728 ok = false;
Ben Hutchings115122a2009-03-04 09:52:52 +00001729 }
1730 if (!ok)
Steve Hodgson4b988282009-01-29 17:50:51 +00001731 efx->port_initialized = false;
1732 }
1733
Ben Hutchings2467ca42008-09-01 12:48:50 +01001734 if (ok) {
1735 efx_init_channels(efx);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001736
Ben Hutchings177dfcd2008-12-12 21:50:08 -08001737 if (efx->phy_op->set_settings(efx, ecmd))
Ben Hutchings2467ca42008-09-01 12:48:50 +01001738 EFX_ERR(efx, "could not restore PHY settings\n");
1739 }
1740
Ben Hutchingsf4150722008-11-04 20:34:28 +00001741 mutex_unlock(&efx->spi_lock);
Ben Hutchings2467ca42008-09-01 12:48:50 +01001742 mutex_unlock(&efx->mac_lock);
1743
Ben Hutchings8c8661e2008-09-01 12:49:02 +01001744 if (ok) {
Ben Hutchings2467ca42008-09-01 12:48:50 +01001745 efx_start_all(efx);
Ben Hutchings1974cc22009-01-29 18:00:07 +00001746 efx_stats_enable(efx);
Ben Hutchings8c8661e2008-09-01 12:49:02 +01001747 }
Ben Hutchings8ceee662008-04-27 12:55:59 +01001748 return rc;
1749}
1750
1751/* Reset the NIC as transparently as possible. Do not reset the PHY
1752 * Note that the reset may fail, in which case the card will be left
1753 * in a most-probably-unusable state.
1754 *
1755 * This function will sleep. You cannot reset from within an atomic
1756 * state; use efx_schedule_reset() instead.
1757 *
1758 * Grabs the rtnl_lock.
1759 */
1760static int efx_reset(struct efx_nic *efx)
1761{
1762 struct ethtool_cmd ecmd;
1763 enum reset_type method = efx->reset_pending;
Ben Hutchingsf4bd9542008-12-26 13:48:51 -08001764 int rc = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +01001765
1766 /* Serialise with kernel interfaces */
1767 rtnl_lock();
1768
1769 /* If we're not RUNNING then don't reset. Leave the reset_pending
1770 * flag set so that efx_pci_probe_main will be retried */
1771 if (efx->state != STATE_RUNNING) {
1772 EFX_INFO(efx, "scheduled reset quenched. NIC not RUNNING\n");
Ben Hutchingsf4bd9542008-12-26 13:48:51 -08001773 goto out_unlock;
Ben Hutchings8ceee662008-04-27 12:55:59 +01001774 }
1775
Ben Hutchings8ceee662008-04-27 12:55:59 +01001776 EFX_INFO(efx, "resetting (%d)\n", method);
1777
Steve Hodgson4b988282009-01-29 17:50:51 +00001778 efx_reset_down(efx, method, &ecmd);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001779
1780 rc = falcon_reset_hw(efx, method);
1781 if (rc) {
1782 EFX_ERR(efx, "failed to reset hardware\n");
Ben Hutchingsf4bd9542008-12-26 13:48:51 -08001783 goto out_disable;
Ben Hutchings8ceee662008-04-27 12:55:59 +01001784 }
1785
1786 /* Allow resets to be rescheduled. */
1787 efx->reset_pending = RESET_TYPE_NONE;
1788
1789 /* Reinitialise bus-mastering, which may have been turned off before
1790 * the reset was scheduled. This is still appropriate, even in the
1791 * RESET_TYPE_DISABLE since this driver generally assumes the hardware
1792 * can respond to requests. */
1793 pci_set_master(efx->pci_dev);
1794
Ben Hutchings8ceee662008-04-27 12:55:59 +01001795 /* Leave device stopped if necessary */
1796 if (method == RESET_TYPE_DISABLE) {
Steve Hodgson4b988282009-01-29 17:50:51 +00001797 efx_reset_up(efx, method, &ecmd, false);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001798 rc = -EIO;
Ben Hutchingsf4bd9542008-12-26 13:48:51 -08001799 } else {
Steve Hodgson4b988282009-01-29 17:50:51 +00001800 rc = efx_reset_up(efx, method, &ecmd, true);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001801 }
1802
Ben Hutchingsf4bd9542008-12-26 13:48:51 -08001803out_disable:
1804 if (rc) {
1805 EFX_ERR(efx, "has been disabled\n");
1806 efx->state = STATE_DISABLED;
1807 dev_close(efx->net_dev);
1808 } else {
1809 EFX_LOG(efx, "reset complete\n");
1810 }
Ben Hutchings8ceee662008-04-27 12:55:59 +01001811
Ben Hutchingsf4bd9542008-12-26 13:48:51 -08001812out_unlock:
Ben Hutchings8ceee662008-04-27 12:55:59 +01001813 rtnl_unlock();
Ben Hutchings8ceee662008-04-27 12:55:59 +01001814 return rc;
1815}
1816
1817/* The worker thread exists so that code that cannot sleep can
1818 * schedule a reset for later.
1819 */
1820static void efx_reset_work(struct work_struct *data)
1821{
1822 struct efx_nic *nic = container_of(data, struct efx_nic, reset_work);
1823
1824 efx_reset(nic);
1825}
1826
1827void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
1828{
1829 enum reset_type method;
1830
1831 if (efx->reset_pending != RESET_TYPE_NONE) {
1832 EFX_INFO(efx, "quenching already scheduled reset\n");
1833 return;
1834 }
1835
1836 switch (type) {
1837 case RESET_TYPE_INVISIBLE:
1838 case RESET_TYPE_ALL:
1839 case RESET_TYPE_WORLD:
1840 case RESET_TYPE_DISABLE:
1841 method = type;
1842 break;
1843 case RESET_TYPE_RX_RECOVERY:
1844 case RESET_TYPE_RX_DESC_FETCH:
1845 case RESET_TYPE_TX_DESC_FETCH:
1846 case RESET_TYPE_TX_SKIP:
1847 method = RESET_TYPE_INVISIBLE;
1848 break;
1849 default:
1850 method = RESET_TYPE_ALL;
1851 break;
1852 }
1853
1854 if (method != type)
1855 EFX_LOG(efx, "scheduling reset (%d:%d)\n", type, method);
1856 else
1857 EFX_LOG(efx, "scheduling reset (%d)\n", method);
1858
1859 efx->reset_pending = method;
1860
Steve Hodgson1ab00622008-12-12 21:33:02 -08001861 queue_work(reset_workqueue, &efx->reset_work);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001862}
1863
1864/**************************************************************************
1865 *
1866 * List of NICs we support
1867 *
1868 **************************************************************************/
1869
1870/* PCI device ID table */
1871static struct pci_device_id efx_pci_table[] __devinitdata = {
1872 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID),
1873 .driver_data = (unsigned long) &falcon_a_nic_type},
1874 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID),
1875 .driver_data = (unsigned long) &falcon_b_nic_type},
1876 {0} /* end of list */
1877};
1878
1879/**************************************************************************
1880 *
1881 * Dummy PHY/MAC/Board operations
1882 *
Ben Hutchings01aad7b2008-09-01 12:48:36 +01001883 * Can be used for some unimplemented operations
Ben Hutchings8ceee662008-04-27 12:55:59 +01001884 * Needed so all function pointers are valid and do not have to be tested
1885 * before use
1886 *
1887 **************************************************************************/
1888int efx_port_dummy_op_int(struct efx_nic *efx)
1889{
1890 return 0;
1891}
1892void efx_port_dummy_op_void(struct efx_nic *efx) {}
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +01001893void efx_port_dummy_op_blink(struct efx_nic *efx, bool blink) {}
Ben Hutchings8ceee662008-04-27 12:55:59 +01001894
Ben Hutchings177dfcd2008-12-12 21:50:08 -08001895static struct efx_mac_operations efx_dummy_mac_operations = {
1896 .reconfigure = efx_port_dummy_op_void,
Ben Hutchings766ca0f2008-12-12 21:59:24 -08001897 .poll = efx_port_dummy_op_void,
1898 .irq = efx_port_dummy_op_void,
Ben Hutchings177dfcd2008-12-12 21:50:08 -08001899};
1900
Ben Hutchings8ceee662008-04-27 12:55:59 +01001901static struct efx_phy_operations efx_dummy_phy_operations = {
1902 .init = efx_port_dummy_op_int,
1903 .reconfigure = efx_port_dummy_op_void,
Ben Hutchings766ca0f2008-12-12 21:59:24 -08001904 .poll = efx_port_dummy_op_void,
Ben Hutchings8ceee662008-04-27 12:55:59 +01001905 .fini = efx_port_dummy_op_void,
1906 .clear_interrupt = efx_port_dummy_op_void,
Ben Hutchings8ceee662008-04-27 12:55:59 +01001907};
1908
Ben Hutchings8ceee662008-04-27 12:55:59 +01001909static struct efx_board efx_dummy_board_info = {
Ben Hutchings01aad7b2008-09-01 12:48:36 +01001910 .init = efx_port_dummy_op_int,
Ben Hutchings8129d212009-02-27 13:08:03 +00001911 .init_leds = efx_port_dummy_op_void,
1912 .set_id_led = efx_port_dummy_op_blink,
Ben Hutchingsa17102b2008-12-12 21:28:20 -08001913 .monitor = efx_port_dummy_op_int,
Ben Hutchings01aad7b2008-09-01 12:48:36 +01001914 .blink = efx_port_dummy_op_blink,
1915 .fini = efx_port_dummy_op_void,
Ben Hutchings8ceee662008-04-27 12:55:59 +01001916};
1917
1918/**************************************************************************
1919 *
1920 * Data housekeeping
1921 *
1922 **************************************************************************/
1923
1924/* This zeroes out and then fills in the invariants in a struct
1925 * efx_nic (including all sub-structures).
1926 */
1927static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
1928 struct pci_dev *pci_dev, struct net_device *net_dev)
1929{
1930 struct efx_channel *channel;
1931 struct efx_tx_queue *tx_queue;
1932 struct efx_rx_queue *rx_queue;
Steve Hodgson1ab00622008-12-12 21:33:02 -08001933 int i;
Ben Hutchings8ceee662008-04-27 12:55:59 +01001934
1935 /* Initialise common structures */
1936 memset(efx, 0, sizeof(*efx));
1937 spin_lock_init(&efx->biu_lock);
1938 spin_lock_init(&efx->phy_lock);
Ben Hutchingsf4150722008-11-04 20:34:28 +00001939 mutex_init(&efx->spi_lock);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001940 INIT_WORK(&efx->reset_work, efx_reset_work);
1941 INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
1942 efx->pci_dev = pci_dev;
1943 efx->state = STATE_INIT;
1944 efx->reset_pending = RESET_TYPE_NONE;
1945 strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
1946 efx->board_info = efx_dummy_board_info;
1947
1948 efx->net_dev = net_dev;
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +01001949 efx->rx_checksum_enabled = true;
Ben Hutchings8ceee662008-04-27 12:55:59 +01001950 spin_lock_init(&efx->netif_stop_lock);
1951 spin_lock_init(&efx->stats_lock);
Ben Hutchings1974cc22009-01-29 18:00:07 +00001952 efx->stats_disable_count = 1;
Ben Hutchings8ceee662008-04-27 12:55:59 +01001953 mutex_init(&efx->mac_lock);
Ben Hutchings177dfcd2008-12-12 21:50:08 -08001954 efx->mac_op = &efx_dummy_mac_operations;
Ben Hutchings8ceee662008-04-27 12:55:59 +01001955 efx->phy_op = &efx_dummy_phy_operations;
Ben Hutchings68e7f452009-04-29 08:05:08 +00001956 efx->mdio.dev = net_dev;
Ben Hutchings766ca0f2008-12-12 21:59:24 -08001957 INIT_WORK(&efx->phy_work, efx_phy_work);
1958 INIT_WORK(&efx->mac_work, efx_mac_work);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001959 atomic_set(&efx->netif_stop_count, 1);
1960
1961 for (i = 0; i < EFX_MAX_CHANNELS; i++) {
1962 channel = &efx->channel[i];
1963 channel->efx = efx;
1964 channel->channel = i;
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +01001965 channel->work_pending = false;
Ben Hutchings8ceee662008-04-27 12:55:59 +01001966 }
Ben Hutchings60ac1062008-09-01 12:44:59 +01001967 for (i = 0; i < EFX_TX_QUEUE_COUNT; i++) {
Ben Hutchings8ceee662008-04-27 12:55:59 +01001968 tx_queue = &efx->tx_queue[i];
1969 tx_queue->efx = efx;
1970 tx_queue->queue = i;
1971 tx_queue->buffer = NULL;
1972 tx_queue->channel = &efx->channel[0]; /* for safety */
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001973 tx_queue->tso_headers_free = NULL;
Ben Hutchings8ceee662008-04-27 12:55:59 +01001974 }
1975 for (i = 0; i < EFX_MAX_RX_QUEUES; i++) {
1976 rx_queue = &efx->rx_queue[i];
1977 rx_queue->efx = efx;
1978 rx_queue->queue = i;
1979 rx_queue->channel = &efx->channel[0]; /* for safety */
1980 rx_queue->buffer = NULL;
1981 spin_lock_init(&rx_queue->add_lock);
1982 INIT_DELAYED_WORK(&rx_queue->work, efx_rx_work);
1983 }
1984
1985 efx->type = type;
1986
Ben Hutchings8ceee662008-04-27 12:55:59 +01001987 /* As close as we can get to guaranteeing that we don't overflow */
Ben Hutchings3ffeabd2009-10-23 08:30:58 +00001988 BUILD_BUG_ON(EFX_EVQ_SIZE < EFX_TXQ_SIZE + EFX_RXQ_SIZE);
1989
Ben Hutchings8ceee662008-04-27 12:55:59 +01001990 EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
1991
1992 /* Higher numbered interrupt modes are less capable! */
1993 efx->interrupt_mode = max(efx->type->max_interrupt_mode,
1994 interrupt_mode);
1995
Ben Hutchings6977dc62008-12-26 13:44:39 -08001996 /* Would be good to use the net_dev name, but we're too early */
1997 snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s",
1998 pci_name(pci_dev));
1999 efx->workqueue = create_singlethread_workqueue(efx->workqueue_name);
Steve Hodgson1ab00622008-12-12 21:33:02 -08002000 if (!efx->workqueue)
2001 return -ENOMEM;
Ben Hutchings8d9853d2008-07-18 19:01:20 +01002002
Ben Hutchings8ceee662008-04-27 12:55:59 +01002003 return 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +01002004}
2005
2006static void efx_fini_struct(struct efx_nic *efx)
2007{
2008 if (efx->workqueue) {
2009 destroy_workqueue(efx->workqueue);
2010 efx->workqueue = NULL;
2011 }
2012}
2013
2014/**************************************************************************
2015 *
2016 * PCI interface
2017 *
2018 **************************************************************************/
2019
2020/* Main body of final NIC shutdown code
2021 * This is called only at module unload (or hotplug removal).
2022 */
2023static void efx_pci_remove_main(struct efx_nic *efx)
2024{
2025 EFX_ASSERT_RESET_SERIALISED(efx);
2026
2027 /* Skip everything if we never obtained a valid membase */
2028 if (!efx->membase)
2029 return;
2030
Ben Hutchingsf01865f2009-10-23 08:31:37 +00002031 falcon_fini_interrupt(efx);
Ben Hutchings8ceee662008-04-27 12:55:59 +01002032 efx_fini_channels(efx);
2033 efx_fini_port(efx);
2034
2035 /* Shutdown the board, then the NIC and board state */
Ben Hutchings37b5a602008-05-30 22:27:04 +01002036 efx->board_info.fini(efx);
Ben Hutchings8ceee662008-04-27 12:55:59 +01002037
2038 efx_fini_napi(efx);
2039 efx_remove_all(efx);
2040}
2041
2042/* Final NIC shutdown
2043 * This is called only at module unload (or hotplug removal).
2044 */
2045static void efx_pci_remove(struct pci_dev *pci_dev)
2046{
2047 struct efx_nic *efx;
2048
2049 efx = pci_get_drvdata(pci_dev);
2050 if (!efx)
2051 return;
2052
2053 /* Mark the NIC as fini, then stop the interface */
2054 rtnl_lock();
2055 efx->state = STATE_FINI;
2056 dev_close(efx->net_dev);
2057
2058 /* Allow any queued efx_resets() to complete */
2059 rtnl_unlock();
2060
2061 if (efx->membase == NULL)
2062 goto out;
2063
2064 efx_unregister_netdev(efx);
2065
Ben Hutchings7dde5962008-12-12 22:09:38 -08002066 efx_mtd_remove(efx);
2067
Ben Hutchings8ceee662008-04-27 12:55:59 +01002068 /* Wait for any scheduled resets to complete. No more will be
2069 * scheduled from this point because efx_stop_all() has been
2070 * called, we are no longer registered with driverlink, and
2071 * the net_device's have been removed. */
Steve Hodgson1ab00622008-12-12 21:33:02 -08002072 cancel_work_sync(&efx->reset_work);
Ben Hutchings8ceee662008-04-27 12:55:59 +01002073
2074 efx_pci_remove_main(efx);
2075
2076out:
2077 efx_fini_io(efx);
2078 EFX_LOG(efx, "shutdown successful\n");
2079
2080 pci_set_drvdata(pci_dev, NULL);
2081 efx_fini_struct(efx);
2082 free_netdev(efx->net_dev);
2083};
2084
2085/* Main body of NIC initialisation
2086 * This is called at module load (or hotplug insertion, theoretically).
2087 */
2088static int efx_pci_probe_main(struct efx_nic *efx)
2089{
2090 int rc;
2091
2092 /* Do start-of-day initialisation */
2093 rc = efx_probe_all(efx);
2094 if (rc)
2095 goto fail1;
2096
2097 rc = efx_init_napi(efx);
2098 if (rc)
2099 goto fail2;
2100
2101 /* Initialise the board */
2102 rc = efx->board_info.init(efx);
2103 if (rc) {
2104 EFX_ERR(efx, "failed to initialise board\n");
2105 goto fail3;
2106 }
2107
2108 rc = falcon_init_nic(efx);
2109 if (rc) {
2110 EFX_ERR(efx, "failed to initialise NIC\n");
2111 goto fail4;
2112 }
2113
2114 rc = efx_init_port(efx);
2115 if (rc) {
2116 EFX_ERR(efx, "failed to initialise port\n");
2117 goto fail5;
2118 }
2119
Ben Hutchingsbc3c90a2008-09-01 12:48:46 +01002120 efx_init_channels(efx);
Ben Hutchings8ceee662008-04-27 12:55:59 +01002121
2122 rc = falcon_init_interrupt(efx);
2123 if (rc)
Ben Hutchingsbc3c90a2008-09-01 12:48:46 +01002124 goto fail6;
Ben Hutchings8ceee662008-04-27 12:55:59 +01002125
2126 return 0;
2127
Ben Hutchings8ceee662008-04-27 12:55:59 +01002128 fail6:
Ben Hutchingsbc3c90a2008-09-01 12:48:46 +01002129 efx_fini_channels(efx);
Ben Hutchings8ceee662008-04-27 12:55:59 +01002130 efx_fini_port(efx);
2131 fail5:
2132 fail4:
Ben Hutchingsa17102b2008-12-12 21:28:20 -08002133 efx->board_info.fini(efx);
Ben Hutchings8ceee662008-04-27 12:55:59 +01002134 fail3:
2135 efx_fini_napi(efx);
2136 fail2:
2137 efx_remove_all(efx);
2138 fail1:
2139 return rc;
2140}
2141
2142/* NIC initialisation
2143 *
2144 * This is called at module load (or hotplug insertion,
2145 * theoretically). It sets up PCI mappings, tests and resets the NIC,
2146 * sets up and registers the network devices with the kernel and hooks
2147 * the interrupt service routine. It does not prepare the device for
2148 * transmission; this is left to the first time one of the network
2149 * interfaces is brought up (i.e. efx_net_open).
2150 */
2151static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2152 const struct pci_device_id *entry)
2153{
2154 struct efx_nic_type *type = (struct efx_nic_type *) entry->driver_data;
2155 struct net_device *net_dev;
2156 struct efx_nic *efx;
2157 int i, rc;
2158
2159 /* Allocate and initialise a struct net_device and struct efx_nic */
2160 net_dev = alloc_etherdev(sizeof(*efx));
2161 if (!net_dev)
2162 return -ENOMEM;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01002163 net_dev->features |= (NETIF_F_IP_CSUM | NETIF_F_SG |
Ben Hutchings97bc5412009-05-19 16:19:08 -07002164 NETIF_F_HIGHDMA | NETIF_F_TSO |
2165 NETIF_F_GRO);
Ben Hutchings285065632008-09-01 12:46:54 +01002166 /* Mask for features that also apply to VLAN devices */
2167 net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
Ben Hutchings740847d2008-09-01 12:48:23 +01002168 NETIF_F_HIGHDMA | NETIF_F_TSO);
Ben Hutchings767e4682008-09-01 12:43:14 +01002169 efx = netdev_priv(net_dev);
Ben Hutchings8ceee662008-04-27 12:55:59 +01002170 pci_set_drvdata(pci_dev, efx);
2171 rc = efx_init_struct(efx, type, pci_dev, net_dev);
2172 if (rc)
2173 goto fail1;
2174
2175 EFX_INFO(efx, "Solarflare Communications NIC detected\n");
2176
2177 /* Set up basic I/O (BAR mappings etc) */
2178 rc = efx_init_io(efx);
2179 if (rc)
2180 goto fail2;
2181
2182 /* No serialisation is required with the reset path because
2183 * we're in STATE_INIT. */
2184 for (i = 0; i < 5; i++) {
2185 rc = efx_pci_probe_main(efx);
Ben Hutchings8ceee662008-04-27 12:55:59 +01002186
2187 /* Serialise against efx_reset(). No more resets will be
2188 * scheduled since efx_stop_all() has been called, and we
2189 * have not and never have been registered with either
2190 * the rtnetlink or driverlink layers. */
Steve Hodgson1ab00622008-12-12 21:33:02 -08002191 cancel_work_sync(&efx->reset_work);
Ben Hutchings8ceee662008-04-27 12:55:59 +01002192
Steve Hodgsonfa402b22008-12-12 22:08:16 -08002193 if (rc == 0) {
2194 if (efx->reset_pending != RESET_TYPE_NONE) {
2195 /* If there was a scheduled reset during
2196 * probe, the NIC is probably hosed anyway */
2197 efx_pci_remove_main(efx);
2198 rc = -EIO;
2199 } else {
2200 break;
2201 }
2202 }
2203
Ben Hutchings8ceee662008-04-27 12:55:59 +01002204 /* Retry if a recoverably reset event has been scheduled */
2205 if ((efx->reset_pending != RESET_TYPE_INVISIBLE) &&
2206 (efx->reset_pending != RESET_TYPE_ALL))
2207 goto fail3;
2208
2209 efx->reset_pending = RESET_TYPE_NONE;
2210 }
2211
2212 if (rc) {
2213 EFX_ERR(efx, "Could not reset NIC\n");
2214 goto fail4;
2215 }
2216
2217 /* Switch to the running state before we expose the device to
2218 * the OS. This is to ensure that the initial gathering of
2219 * MAC stats succeeds. */
Ben Hutchings8ceee662008-04-27 12:55:59 +01002220 efx->state = STATE_RUNNING;
Ben Hutchings7dde5962008-12-12 22:09:38 -08002221
2222 efx_mtd_probe(efx); /* allowed to fail */
Ben Hutchings8ceee662008-04-27 12:55:59 +01002223
2224 rc = efx_register_netdev(efx);
2225 if (rc)
2226 goto fail5;
2227
2228 EFX_LOG(efx, "initialisation successful\n");
Ben Hutchings8ceee662008-04-27 12:55:59 +01002229 return 0;
2230
2231 fail5:
2232 efx_pci_remove_main(efx);
2233 fail4:
2234 fail3:
2235 efx_fini_io(efx);
2236 fail2:
2237 efx_fini_struct(efx);
2238 fail1:
2239 EFX_LOG(efx, "initialisation failed. rc=%d\n", rc);
2240 free_netdev(net_dev);
2241 return rc;
2242}
2243
2244static struct pci_driver efx_pci_driver = {
2245 .name = EFX_DRIVER_NAME,
2246 .id_table = efx_pci_table,
2247 .probe = efx_pci_probe,
2248 .remove = efx_pci_remove,
2249};
2250
2251/**************************************************************************
2252 *
2253 * Kernel module interface
2254 *
2255 *************************************************************************/
2256
2257module_param(interrupt_mode, uint, 0444);
2258MODULE_PARM_DESC(interrupt_mode,
2259 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
2260
2261static int __init efx_init_module(void)
2262{
2263 int rc;
2264
2265 printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n");
2266
2267 rc = register_netdevice_notifier(&efx_netdev_notifier);
2268 if (rc)
2269 goto err_notifier;
2270
2271 refill_workqueue = create_workqueue("sfc_refill");
2272 if (!refill_workqueue) {
2273 rc = -ENOMEM;
2274 goto err_refill;
2275 }
Steve Hodgson1ab00622008-12-12 21:33:02 -08002276 reset_workqueue = create_singlethread_workqueue("sfc_reset");
2277 if (!reset_workqueue) {
2278 rc = -ENOMEM;
2279 goto err_reset;
2280 }
Ben Hutchings8ceee662008-04-27 12:55:59 +01002281
2282 rc = pci_register_driver(&efx_pci_driver);
2283 if (rc < 0)
2284 goto err_pci;
2285
2286 return 0;
2287
2288 err_pci:
Steve Hodgson1ab00622008-12-12 21:33:02 -08002289 destroy_workqueue(reset_workqueue);
2290 err_reset:
Ben Hutchings8ceee662008-04-27 12:55:59 +01002291 destroy_workqueue(refill_workqueue);
2292 err_refill:
2293 unregister_netdevice_notifier(&efx_netdev_notifier);
2294 err_notifier:
2295 return rc;
2296}
2297
2298static void __exit efx_exit_module(void)
2299{
2300 printk(KERN_INFO "Solarflare NET driver unloading\n");
2301
2302 pci_unregister_driver(&efx_pci_driver);
Steve Hodgson1ab00622008-12-12 21:33:02 -08002303 destroy_workqueue(reset_workqueue);
Ben Hutchings8ceee662008-04-27 12:55:59 +01002304 destroy_workqueue(refill_workqueue);
2305 unregister_netdevice_notifier(&efx_netdev_notifier);
2306
2307}
2308
2309module_init(efx_init_module);
2310module_exit(efx_exit_module);
2311
2312MODULE_AUTHOR("Michael Brown <mbrown@fensystems.co.uk> and "
2313 "Solarflare Communications");
2314MODULE_DESCRIPTION("Solarflare Communications network driver");
2315MODULE_LICENSE("GPL");
2316MODULE_DEVICE_TABLE(pci, efx_pci_table);