blob: f6131e578b449a61d2f613abc65d3b90e915f0ff [file] [log] [blame]
Ben Hutchings8ceee662008-04-27 12:55:59 +01001/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/module.h>
12#include <linux/pci.h>
13#include <linux/netdevice.h>
14#include <linux/etherdevice.h>
15#include <linux/delay.h>
16#include <linux/notifier.h>
17#include <linux/ip.h>
18#include <linux/tcp.h>
19#include <linux/in.h>
20#include <linux/crc32.h>
21#include <linux/ethtool.h>
22#include "net_driver.h"
23#include "gmii.h"
24#include "ethtool.h"
25#include "tx.h"
26#include "rx.h"
27#include "efx.h"
28#include "mdio_10g.h"
29#include "falcon.h"
30#include "workarounds.h"
31#include "mac.h"
32
33#define EFX_MAX_MTU (9 * 1024)
34
35/* RX slow fill workqueue. If memory allocation fails in the fast path,
36 * a work item is pushed onto this work queue to retry the allocation later,
37 * to avoid the NIC being starved of RX buffers. Since this is a per cpu
38 * workqueue, there is nothing to be gained in making it per NIC
39 */
40static struct workqueue_struct *refill_workqueue;
41
42/**************************************************************************
43 *
44 * Configurable values
45 *
46 *************************************************************************/
47
48/*
49 * Enable large receive offload (LRO) aka soft segment reassembly (SSR)
50 *
51 * This sets the default for new devices. It can be controlled later
52 * using ethtool.
53 */
54static int lro = 1;
55module_param(lro, int, 0644);
56MODULE_PARM_DESC(lro, "Large receive offload acceleration");
57
58/*
59 * Use separate channels for TX and RX events
60 *
61 * Set this to 1 to use separate channels for TX and RX. It allows us to
62 * apply a higher level of interrupt moderation to TX events.
63 *
64 * This is forced to 0 for MSI interrupt mode as the interrupt vector
65 * is not written
66 */
67static unsigned int separate_tx_and_rx_channels = 1;
68
69/* This is the weight assigned to each of the (per-channel) virtual
70 * NAPI devices.
71 */
72static int napi_weight = 64;
73
74/* This is the time (in jiffies) between invocations of the hardware
75 * monitor, which checks for known hardware bugs and resets the
76 * hardware and driver as necessary.
77 */
78unsigned int efx_monitor_interval = 1 * HZ;
79
80/* This controls whether or not the hardware monitor will trigger a
81 * reset when it detects an error condition.
82 */
83static unsigned int monitor_reset = 1;
84
85/* This controls whether or not the driver will initialise devices
86 * with invalid MAC addresses stored in the EEPROM or flash. If true,
87 * such devices will be initialised with a random locally-generated
88 * MAC address. This allows for loading the sfc_mtd driver to
89 * reprogram the flash, even if the flash contents (including the MAC
90 * address) have previously been erased.
91 */
92static unsigned int allow_bad_hwaddr;
93
94/* Initial interrupt moderation settings. They can be modified after
95 * module load with ethtool.
96 *
97 * The default for RX should strike a balance between increasing the
98 * round-trip latency and reducing overhead.
99 */
100static unsigned int rx_irq_mod_usec = 60;
101
102/* Initial interrupt moderation settings. They can be modified after
103 * module load with ethtool.
104 *
105 * This default is chosen to ensure that a 10G link does not go idle
106 * while a TX queue is stopped after it has become full. A queue is
107 * restarted when it drops below half full. The time this takes (assuming
108 * worst case 3 descriptors per packet and 1024 descriptors) is
109 * 512 / 3 * 1.2 = 205 usec.
110 */
111static unsigned int tx_irq_mod_usec = 150;
112
113/* This is the first interrupt mode to try out of:
114 * 0 => MSI-X
115 * 1 => MSI
116 * 2 => legacy
117 */
118static unsigned int interrupt_mode;
119
120/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
121 * i.e. the number of CPUs among which we may distribute simultaneous
122 * interrupt handling.
123 *
124 * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
125 * The default (0) means to assign an interrupt to each package (level II cache)
126 */
127static unsigned int rss_cpus;
128module_param(rss_cpus, uint, 0444);
129MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
130
131/**************************************************************************
132 *
133 * Utility functions and prototypes
134 *
135 *************************************************************************/
136static void efx_remove_channel(struct efx_channel *channel);
137static void efx_remove_port(struct efx_nic *efx);
138static void efx_fini_napi(struct efx_nic *efx);
139static void efx_fini_channels(struct efx_nic *efx);
140
141#define EFX_ASSERT_RESET_SERIALISED(efx) \
142 do { \
143 if ((efx->state == STATE_RUNNING) || \
144 (efx->state == STATE_RESETTING)) \
145 ASSERT_RTNL(); \
146 } while (0)
147
148/**************************************************************************
149 *
150 * Event queue processing
151 *
152 *************************************************************************/
153
154/* Process channel's event queue
155 *
156 * This function is responsible for processing the event queue of a
157 * single channel. The caller must guarantee that this function will
158 * never be concurrently called more than once on the same channel,
159 * though different channels may be being processed concurrently.
160 */
161static inline int efx_process_channel(struct efx_channel *channel, int rx_quota)
162{
163 int rxdmaqs;
164 struct efx_rx_queue *rx_queue;
165
166 if (unlikely(channel->efx->reset_pending != RESET_TYPE_NONE ||
167 !channel->enabled))
168 return rx_quota;
169
170 rxdmaqs = falcon_process_eventq(channel, &rx_quota);
171
172 /* Deliver last RX packet. */
173 if (channel->rx_pkt) {
174 __efx_rx_packet(channel, channel->rx_pkt,
175 channel->rx_pkt_csummed);
176 channel->rx_pkt = NULL;
177 }
178
179 efx_flush_lro(channel);
180 efx_rx_strategy(channel);
181
182 /* Refill descriptor rings as necessary */
183 rx_queue = &channel->efx->rx_queue[0];
184 while (rxdmaqs) {
185 if (rxdmaqs & 0x01)
186 efx_fast_push_rx_descriptors(rx_queue);
187 rx_queue++;
188 rxdmaqs >>= 1;
189 }
190
191 return rx_quota;
192}
193
194/* Mark channel as finished processing
195 *
196 * Note that since we will not receive further interrupts for this
197 * channel before we finish processing and call the eventq_read_ack()
198 * method, there is no need to use the interrupt hold-off timers.
199 */
200static inline void efx_channel_processed(struct efx_channel *channel)
201{
202 /* Write to EVQ_RPTR_REG. If a new event arrived in a race
203 * with finishing processing, a new interrupt will be raised.
204 */
205 channel->work_pending = 0;
206 smp_wmb(); /* Ensure channel updated before any new interrupt. */
207 falcon_eventq_read_ack(channel);
208}
209
210/* NAPI poll handler
211 *
212 * NAPI guarantees serialisation of polls of the same device, which
213 * provides the guarantee required by efx_process_channel().
214 */
215static int efx_poll(struct napi_struct *napi, int budget)
216{
217 struct efx_channel *channel =
218 container_of(napi, struct efx_channel, napi_str);
219 struct net_device *napi_dev = channel->napi_dev;
220 int unused;
221 int rx_packets;
222
223 EFX_TRACE(channel->efx, "channel %d NAPI poll executing on CPU %d\n",
224 channel->channel, raw_smp_processor_id());
225
226 unused = efx_process_channel(channel, budget);
227 rx_packets = (budget - unused);
228
229 if (rx_packets < budget) {
230 /* There is no race here; although napi_disable() will
231 * only wait for netif_rx_complete(), this isn't a problem
232 * since efx_channel_processed() will have no effect if
233 * interrupts have already been disabled.
234 */
235 netif_rx_complete(napi_dev, napi);
236 efx_channel_processed(channel);
237 }
238
239 return rx_packets;
240}
241
242/* Process the eventq of the specified channel immediately on this CPU
243 *
244 * Disable hardware generated interrupts, wait for any existing
245 * processing to finish, then directly poll (and ack ) the eventq.
246 * Finally reenable NAPI and interrupts.
247 *
248 * Since we are touching interrupts the caller should hold the suspend lock
249 */
250void efx_process_channel_now(struct efx_channel *channel)
251{
252 struct efx_nic *efx = channel->efx;
253
254 BUG_ON(!channel->used_flags);
255 BUG_ON(!channel->enabled);
256
257 /* Disable interrupts and wait for ISRs to complete */
258 falcon_disable_interrupts(efx);
259 if (efx->legacy_irq)
260 synchronize_irq(efx->legacy_irq);
261 if (channel->has_interrupt && channel->irq)
262 synchronize_irq(channel->irq);
263
264 /* Wait for any NAPI processing to complete */
265 napi_disable(&channel->napi_str);
266
267 /* Poll the channel */
Ben Hutchings91ad7572008-05-16 21:14:27 +0100268 efx_process_channel(channel, efx->type->evq_size);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100269
270 /* Ack the eventq. This may cause an interrupt to be generated
271 * when they are reenabled */
272 efx_channel_processed(channel);
273
274 napi_enable(&channel->napi_str);
275 falcon_enable_interrupts(efx);
276}
277
278/* Create event queue
279 * Event queue memory allocations are done only once. If the channel
280 * is reset, the memory buffer will be reused; this guards against
281 * errors during channel reset and also simplifies interrupt handling.
282 */
283static int efx_probe_eventq(struct efx_channel *channel)
284{
285 EFX_LOG(channel->efx, "chan %d create event queue\n", channel->channel);
286
287 return falcon_probe_eventq(channel);
288}
289
290/* Prepare channel's event queue */
291static int efx_init_eventq(struct efx_channel *channel)
292{
293 EFX_LOG(channel->efx, "chan %d init event queue\n", channel->channel);
294
295 channel->eventq_read_ptr = 0;
296
297 return falcon_init_eventq(channel);
298}
299
300static void efx_fini_eventq(struct efx_channel *channel)
301{
302 EFX_LOG(channel->efx, "chan %d fini event queue\n", channel->channel);
303
304 falcon_fini_eventq(channel);
305}
306
307static void efx_remove_eventq(struct efx_channel *channel)
308{
309 EFX_LOG(channel->efx, "chan %d remove event queue\n", channel->channel);
310
311 falcon_remove_eventq(channel);
312}
313
314/**************************************************************************
315 *
316 * Channel handling
317 *
318 *************************************************************************/
319
Ben Hutchings8ceee662008-04-27 12:55:59 +0100320static int efx_probe_channel(struct efx_channel *channel)
321{
322 struct efx_tx_queue *tx_queue;
323 struct efx_rx_queue *rx_queue;
324 int rc;
325
326 EFX_LOG(channel->efx, "creating channel %d\n", channel->channel);
327
328 rc = efx_probe_eventq(channel);
329 if (rc)
330 goto fail1;
331
332 efx_for_each_channel_tx_queue(tx_queue, channel) {
333 rc = efx_probe_tx_queue(tx_queue);
334 if (rc)
335 goto fail2;
336 }
337
338 efx_for_each_channel_rx_queue(rx_queue, channel) {
339 rc = efx_probe_rx_queue(rx_queue);
340 if (rc)
341 goto fail3;
342 }
343
344 channel->n_rx_frm_trunc = 0;
345
346 return 0;
347
348 fail3:
349 efx_for_each_channel_rx_queue(rx_queue, channel)
350 efx_remove_rx_queue(rx_queue);
351 fail2:
352 efx_for_each_channel_tx_queue(tx_queue, channel)
353 efx_remove_tx_queue(tx_queue);
354 fail1:
355 return rc;
356}
357
358
359/* Channels are shutdown and reinitialised whilst the NIC is running
360 * to propagate configuration changes (mtu, checksum offload), or
361 * to clear hardware error conditions
362 */
363static int efx_init_channels(struct efx_nic *efx)
364{
365 struct efx_tx_queue *tx_queue;
366 struct efx_rx_queue *rx_queue;
367 struct efx_channel *channel;
368 int rc = 0;
369
Ben Hutchingsf7f13b02008-05-16 21:15:06 +0100370 /* Calculate the rx buffer allocation parameters required to
371 * support the current MTU, including padding for header
372 * alignment and overruns.
373 */
374 efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
375 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
376 efx->type->rx_buffer_padding);
377 efx->rx_buffer_order = get_order(efx->rx_buffer_len);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100378
379 /* Initialise the channels */
380 efx_for_each_channel(channel, efx) {
381 EFX_LOG(channel->efx, "init chan %d\n", channel->channel);
382
383 rc = efx_init_eventq(channel);
384 if (rc)
385 goto err;
386
387 efx_for_each_channel_tx_queue(tx_queue, channel) {
388 rc = efx_init_tx_queue(tx_queue);
389 if (rc)
390 goto err;
391 }
392
393 /* The rx buffer allocation strategy is MTU dependent */
394 efx_rx_strategy(channel);
395
396 efx_for_each_channel_rx_queue(rx_queue, channel) {
397 rc = efx_init_rx_queue(rx_queue);
398 if (rc)
399 goto err;
400 }
401
402 WARN_ON(channel->rx_pkt != NULL);
403 efx_rx_strategy(channel);
404 }
405
406 return 0;
407
408 err:
409 EFX_ERR(efx, "failed to initialise channel %d\n",
410 channel ? channel->channel : -1);
411 efx_fini_channels(efx);
412 return rc;
413}
414
415/* This enables event queue processing and packet transmission.
416 *
417 * Note that this function is not allowed to fail, since that would
418 * introduce too much complexity into the suspend/resume path.
419 */
420static void efx_start_channel(struct efx_channel *channel)
421{
422 struct efx_rx_queue *rx_queue;
423
424 EFX_LOG(channel->efx, "starting chan %d\n", channel->channel);
425
426 if (!(channel->efx->net_dev->flags & IFF_UP))
427 netif_napi_add(channel->napi_dev, &channel->napi_str,
428 efx_poll, napi_weight);
429
430 channel->work_pending = 0;
431 channel->enabled = 1;
432 smp_wmb(); /* ensure channel updated before first interrupt */
433
434 napi_enable(&channel->napi_str);
435
436 /* Load up RX descriptors */
437 efx_for_each_channel_rx_queue(rx_queue, channel)
438 efx_fast_push_rx_descriptors(rx_queue);
439}
440
441/* This disables event queue processing and packet transmission.
442 * This function does not guarantee that all queue processing
443 * (e.g. RX refill) is complete.
444 */
445static void efx_stop_channel(struct efx_channel *channel)
446{
447 struct efx_rx_queue *rx_queue;
448
449 if (!channel->enabled)
450 return;
451
452 EFX_LOG(channel->efx, "stop chan %d\n", channel->channel);
453
454 channel->enabled = 0;
455 napi_disable(&channel->napi_str);
456
457 /* Ensure that any worker threads have exited or will be no-ops */
458 efx_for_each_channel_rx_queue(rx_queue, channel) {
459 spin_lock_bh(&rx_queue->add_lock);
460 spin_unlock_bh(&rx_queue->add_lock);
461 }
462}
463
464static void efx_fini_channels(struct efx_nic *efx)
465{
466 struct efx_channel *channel;
467 struct efx_tx_queue *tx_queue;
468 struct efx_rx_queue *rx_queue;
469
470 EFX_ASSERT_RESET_SERIALISED(efx);
471 BUG_ON(efx->port_enabled);
472
473 efx_for_each_channel(channel, efx) {
474 EFX_LOG(channel->efx, "shut down chan %d\n", channel->channel);
475
476 efx_for_each_channel_rx_queue(rx_queue, channel)
477 efx_fini_rx_queue(rx_queue);
478 efx_for_each_channel_tx_queue(tx_queue, channel)
479 efx_fini_tx_queue(tx_queue);
480 }
481
482 /* Do the event queues last so that we can handle flush events
483 * for all DMA queues. */
484 efx_for_each_channel(channel, efx) {
485 EFX_LOG(channel->efx, "shut down evq %d\n", channel->channel);
486
487 efx_fini_eventq(channel);
488 }
489}
490
491static void efx_remove_channel(struct efx_channel *channel)
492{
493 struct efx_tx_queue *tx_queue;
494 struct efx_rx_queue *rx_queue;
495
496 EFX_LOG(channel->efx, "destroy chan %d\n", channel->channel);
497
498 efx_for_each_channel_rx_queue(rx_queue, channel)
499 efx_remove_rx_queue(rx_queue);
500 efx_for_each_channel_tx_queue(tx_queue, channel)
501 efx_remove_tx_queue(tx_queue);
502 efx_remove_eventq(channel);
503
504 channel->used_flags = 0;
505}
506
507void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay)
508{
509 queue_delayed_work(refill_workqueue, &rx_queue->work, delay);
510}
511
512/**************************************************************************
513 *
514 * Port handling
515 *
516 **************************************************************************/
517
518/* This ensures that the kernel is kept informed (via
519 * netif_carrier_on/off) of the link status, and also maintains the
520 * link status's stop on the port's TX queue.
521 */
522static void efx_link_status_changed(struct efx_nic *efx)
523{
524 int carrier_ok;
525
526 /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
527 * that no events are triggered between unregister_netdev() and the
528 * driver unloading. A more general condition is that NETDEV_CHANGE
529 * can only be generated between NETDEV_UP and NETDEV_DOWN */
530 if (!netif_running(efx->net_dev))
531 return;
532
533 carrier_ok = netif_carrier_ok(efx->net_dev) ? 1 : 0;
534 if (efx->link_up != carrier_ok) {
535 efx->n_link_state_changes++;
536
537 if (efx->link_up)
538 netif_carrier_on(efx->net_dev);
539 else
540 netif_carrier_off(efx->net_dev);
541 }
542
543 /* Status message for kernel log */
544 if (efx->link_up) {
545 struct mii_if_info *gmii = &efx->mii;
546 unsigned adv, lpa;
547 /* NONE here means direct XAUI from the controller, with no
548 * MDIO-attached device we can query. */
549 if (efx->phy_type != PHY_TYPE_NONE) {
550 adv = gmii_advertised(gmii);
551 lpa = gmii_lpa(gmii);
552 } else {
553 lpa = GM_LPA_10000 | LPA_DUPLEX;
554 adv = lpa;
555 }
556 EFX_INFO(efx, "link up at %dMbps %s-duplex "
557 "(adv %04x lpa %04x) (MTU %d)%s\n",
558 (efx->link_options & GM_LPA_10000 ? 10000 :
559 (efx->link_options & GM_LPA_1000 ? 1000 :
560 (efx->link_options & GM_LPA_100 ? 100 :
561 10))),
562 (efx->link_options & GM_LPA_DUPLEX ?
563 "full" : "half"),
564 adv, lpa,
565 efx->net_dev->mtu,
566 (efx->promiscuous ? " [PROMISC]" : ""));
567 } else {
568 EFX_INFO(efx, "link down\n");
569 }
570
571}
572
573/* This call reinitialises the MAC to pick up new PHY settings. The
574 * caller must hold the mac_lock */
575static void __efx_reconfigure_port(struct efx_nic *efx)
576{
577 WARN_ON(!mutex_is_locked(&efx->mac_lock));
578
579 EFX_LOG(efx, "reconfiguring MAC from PHY settings on CPU %d\n",
580 raw_smp_processor_id());
581
582 falcon_reconfigure_xmac(efx);
583
584 /* Inform kernel of loss/gain of carrier */
585 efx_link_status_changed(efx);
586}
587
588/* Reinitialise the MAC to pick up new PHY settings, even if the port is
589 * disabled. */
590void efx_reconfigure_port(struct efx_nic *efx)
591{
592 EFX_ASSERT_RESET_SERIALISED(efx);
593
594 mutex_lock(&efx->mac_lock);
595 __efx_reconfigure_port(efx);
596 mutex_unlock(&efx->mac_lock);
597}
598
599/* Asynchronous efx_reconfigure_port work item. To speed up efx_flush_all()
600 * we don't efx_reconfigure_port() if the port is disabled. Care is taken
601 * in efx_stop_all() and efx_start_port() to prevent PHY events being lost */
602static void efx_reconfigure_work(struct work_struct *data)
603{
604 struct efx_nic *efx = container_of(data, struct efx_nic,
605 reconfigure_work);
606
607 mutex_lock(&efx->mac_lock);
608 if (efx->port_enabled)
609 __efx_reconfigure_port(efx);
610 mutex_unlock(&efx->mac_lock);
611}
612
613static int efx_probe_port(struct efx_nic *efx)
614{
615 int rc;
616
617 EFX_LOG(efx, "create port\n");
618
619 /* Connect up MAC/PHY operations table and read MAC address */
620 rc = falcon_probe_port(efx);
621 if (rc)
622 goto err;
623
624 /* Sanity check MAC address */
625 if (is_valid_ether_addr(efx->mac_address)) {
626 memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN);
627 } else {
628 DECLARE_MAC_BUF(mac);
629
630 EFX_ERR(efx, "invalid MAC address %s\n",
631 print_mac(mac, efx->mac_address));
632 if (!allow_bad_hwaddr) {
633 rc = -EINVAL;
634 goto err;
635 }
636 random_ether_addr(efx->net_dev->dev_addr);
637 EFX_INFO(efx, "using locally-generated MAC %s\n",
638 print_mac(mac, efx->net_dev->dev_addr));
639 }
640
641 return 0;
642
643 err:
644 efx_remove_port(efx);
645 return rc;
646}
647
648static int efx_init_port(struct efx_nic *efx)
649{
650 int rc;
651
652 EFX_LOG(efx, "init port\n");
653
654 /* Initialise the MAC and PHY */
655 rc = falcon_init_xmac(efx);
656 if (rc)
657 return rc;
658
659 efx->port_initialized = 1;
660
661 /* Reconfigure port to program MAC registers */
662 falcon_reconfigure_xmac(efx);
663
664 return 0;
665}
666
667/* Allow efx_reconfigure_port() to be scheduled, and close the window
668 * between efx_stop_port and efx_flush_all whereby a previously scheduled
669 * efx_reconfigure_port() may have been cancelled */
670static void efx_start_port(struct efx_nic *efx)
671{
672 EFX_LOG(efx, "start port\n");
673 BUG_ON(efx->port_enabled);
674
675 mutex_lock(&efx->mac_lock);
676 efx->port_enabled = 1;
677 __efx_reconfigure_port(efx);
678 mutex_unlock(&efx->mac_lock);
679}
680
681/* Prevent efx_reconfigure_work and efx_monitor() from executing, and
682 * efx_set_multicast_list() from scheduling efx_reconfigure_work.
683 * efx_reconfigure_work can still be scheduled via NAPI processing
684 * until efx_flush_all() is called */
685static void efx_stop_port(struct efx_nic *efx)
686{
687 EFX_LOG(efx, "stop port\n");
688
689 mutex_lock(&efx->mac_lock);
690 efx->port_enabled = 0;
691 mutex_unlock(&efx->mac_lock);
692
693 /* Serialise against efx_set_multicast_list() */
Ben Hutchings55668612008-05-16 21:16:10 +0100694 if (efx_dev_registered(efx)) {
Ben Hutchings8ceee662008-04-27 12:55:59 +0100695 netif_tx_lock_bh(efx->net_dev);
696 netif_tx_unlock_bh(efx->net_dev);
697 }
698}
699
700static void efx_fini_port(struct efx_nic *efx)
701{
702 EFX_LOG(efx, "shut down port\n");
703
704 if (!efx->port_initialized)
705 return;
706
707 falcon_fini_xmac(efx);
708 efx->port_initialized = 0;
709
710 efx->link_up = 0;
711 efx_link_status_changed(efx);
712}
713
714static void efx_remove_port(struct efx_nic *efx)
715{
716 EFX_LOG(efx, "destroying port\n");
717
718 falcon_remove_port(efx);
719}
720
721/**************************************************************************
722 *
723 * NIC handling
724 *
725 **************************************************************************/
726
727/* This configures the PCI device to enable I/O and DMA. */
728static int efx_init_io(struct efx_nic *efx)
729{
730 struct pci_dev *pci_dev = efx->pci_dev;
731 dma_addr_t dma_mask = efx->type->max_dma_mask;
732 int rc;
733
734 EFX_LOG(efx, "initialising I/O\n");
735
736 rc = pci_enable_device(pci_dev);
737 if (rc) {
738 EFX_ERR(efx, "failed to enable PCI device\n");
739 goto fail1;
740 }
741
742 pci_set_master(pci_dev);
743
744 /* Set the PCI DMA mask. Try all possibilities from our
745 * genuine mask down to 32 bits, because some architectures
746 * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
747 * masks event though they reject 46 bit masks.
748 */
749 while (dma_mask > 0x7fffffffUL) {
750 if (pci_dma_supported(pci_dev, dma_mask) &&
751 ((rc = pci_set_dma_mask(pci_dev, dma_mask)) == 0))
752 break;
753 dma_mask >>= 1;
754 }
755 if (rc) {
756 EFX_ERR(efx, "could not find a suitable DMA mask\n");
757 goto fail2;
758 }
759 EFX_LOG(efx, "using DMA mask %llx\n", (unsigned long long) dma_mask);
760 rc = pci_set_consistent_dma_mask(pci_dev, dma_mask);
761 if (rc) {
762 /* pci_set_consistent_dma_mask() is not *allowed* to
763 * fail with a mask that pci_set_dma_mask() accepted,
764 * but just in case...
765 */
766 EFX_ERR(efx, "failed to set consistent DMA mask\n");
767 goto fail2;
768 }
769
770 efx->membase_phys = pci_resource_start(efx->pci_dev,
771 efx->type->mem_bar);
772 rc = pci_request_region(pci_dev, efx->type->mem_bar, "sfc");
773 if (rc) {
774 EFX_ERR(efx, "request for memory BAR failed\n");
775 rc = -EIO;
776 goto fail3;
777 }
778 efx->membase = ioremap_nocache(efx->membase_phys,
779 efx->type->mem_map_size);
780 if (!efx->membase) {
Ben Hutchings086ea352008-05-16 21:17:06 +0100781 EFX_ERR(efx, "could not map memory BAR %d at %llx+%x\n",
782 efx->type->mem_bar,
783 (unsigned long long)efx->membase_phys,
Ben Hutchings8ceee662008-04-27 12:55:59 +0100784 efx->type->mem_map_size);
785 rc = -ENOMEM;
786 goto fail4;
787 }
Ben Hutchings086ea352008-05-16 21:17:06 +0100788 EFX_LOG(efx, "memory BAR %u at %llx+%x (virtual %p)\n",
789 efx->type->mem_bar, (unsigned long long)efx->membase_phys,
790 efx->type->mem_map_size, efx->membase);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100791
792 return 0;
793
794 fail4:
795 release_mem_region(efx->membase_phys, efx->type->mem_map_size);
796 fail3:
Ben Hutchings2c118e02008-05-16 21:15:29 +0100797 efx->membase_phys = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100798 fail2:
799 pci_disable_device(efx->pci_dev);
800 fail1:
801 return rc;
802}
803
804static void efx_fini_io(struct efx_nic *efx)
805{
806 EFX_LOG(efx, "shutting down I/O\n");
807
808 if (efx->membase) {
809 iounmap(efx->membase);
810 efx->membase = NULL;
811 }
812
813 if (efx->membase_phys) {
814 pci_release_region(efx->pci_dev, efx->type->mem_bar);
Ben Hutchings2c118e02008-05-16 21:15:29 +0100815 efx->membase_phys = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100816 }
817
818 pci_disable_device(efx->pci_dev);
819}
820
821/* Probe the number and type of interrupts we are able to obtain. */
822static void efx_probe_interrupts(struct efx_nic *efx)
823{
824 int max_channel = efx->type->phys_addr_channels - 1;
825 struct msix_entry xentries[EFX_MAX_CHANNELS];
826 int rc, i;
827
828 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
829 BUG_ON(!pci_find_capability(efx->pci_dev, PCI_CAP_ID_MSIX));
830
831 efx->rss_queues = rss_cpus ? rss_cpus : num_online_cpus();
832 efx->rss_queues = min(efx->rss_queues, max_channel + 1);
833 efx->rss_queues = min(efx->rss_queues, EFX_MAX_CHANNELS);
834
835 /* Request maximum number of MSI interrupts, and fill out
836 * the channel interrupt information the allowed allocation */
837 for (i = 0; i < efx->rss_queues; i++)
838 xentries[i].entry = i;
839 rc = pci_enable_msix(efx->pci_dev, xentries, efx->rss_queues);
840 if (rc > 0) {
841 EFX_BUG_ON_PARANOID(rc >= efx->rss_queues);
842 efx->rss_queues = rc;
843 rc = pci_enable_msix(efx->pci_dev, xentries,
844 efx->rss_queues);
845 }
846
847 if (rc == 0) {
848 for (i = 0; i < efx->rss_queues; i++) {
849 efx->channel[i].has_interrupt = 1;
850 efx->channel[i].irq = xentries[i].vector;
851 }
852 } else {
853 /* Fall back to single channel MSI */
854 efx->interrupt_mode = EFX_INT_MODE_MSI;
855 EFX_ERR(efx, "could not enable MSI-X\n");
856 }
857 }
858
859 /* Try single interrupt MSI */
860 if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
861 efx->rss_queues = 1;
862 rc = pci_enable_msi(efx->pci_dev);
863 if (rc == 0) {
864 efx->channel[0].irq = efx->pci_dev->irq;
865 efx->channel[0].has_interrupt = 1;
866 } else {
867 EFX_ERR(efx, "could not enable MSI\n");
868 efx->interrupt_mode = EFX_INT_MODE_LEGACY;
869 }
870 }
871
872 /* Assume legacy interrupts */
873 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
874 efx->rss_queues = 1;
875 /* Every channel is interruptible */
876 for (i = 0; i < EFX_MAX_CHANNELS; i++)
877 efx->channel[i].has_interrupt = 1;
878 efx->legacy_irq = efx->pci_dev->irq;
879 }
880}
881
882static void efx_remove_interrupts(struct efx_nic *efx)
883{
884 struct efx_channel *channel;
885
886 /* Remove MSI/MSI-X interrupts */
887 efx_for_each_channel_with_interrupt(channel, efx)
888 channel->irq = 0;
889 pci_disable_msi(efx->pci_dev);
890 pci_disable_msix(efx->pci_dev);
891
892 /* Remove legacy interrupt */
893 efx->legacy_irq = 0;
894}
895
896/* Select number of used resources
897 * Should be called after probe_interrupts()
898 */
899static void efx_select_used(struct efx_nic *efx)
900{
901 struct efx_tx_queue *tx_queue;
902 struct efx_rx_queue *rx_queue;
903 int i;
904
905 /* TX queues. One per port per channel with TX capability
906 * (more than one per port won't work on Linux, due to out
907 * of order issues... but will be fine on Solaris)
908 */
909 tx_queue = &efx->tx_queue[0];
910
911 /* Perform this for each channel with TX capabilities.
912 * At the moment, we only support a single TX queue
913 */
914 tx_queue->used = 1;
915 if ((!EFX_INT_MODE_USE_MSI(efx)) && separate_tx_and_rx_channels)
916 tx_queue->channel = &efx->channel[1];
917 else
918 tx_queue->channel = &efx->channel[0];
919 tx_queue->channel->used_flags |= EFX_USED_BY_TX;
920 tx_queue++;
921
922 /* RX queues. Each has a dedicated channel. */
923 for (i = 0; i < EFX_MAX_RX_QUEUES; i++) {
924 rx_queue = &efx->rx_queue[i];
925
926 if (i < efx->rss_queues) {
927 rx_queue->used = 1;
928 /* If we allow multiple RX queues per channel
929 * we need to decide that here
930 */
931 rx_queue->channel = &efx->channel[rx_queue->queue];
932 rx_queue->channel->used_flags |= EFX_USED_BY_RX;
933 rx_queue++;
934 }
935 }
936}
937
938static int efx_probe_nic(struct efx_nic *efx)
939{
940 int rc;
941
942 EFX_LOG(efx, "creating NIC\n");
943
944 /* Carry out hardware-type specific initialisation */
945 rc = falcon_probe_nic(efx);
946 if (rc)
947 return rc;
948
949 /* Determine the number of channels and RX queues by trying to hook
950 * in MSI-X interrupts. */
951 efx_probe_interrupts(efx);
952
953 /* Determine number of RX queues and TX queues */
954 efx_select_used(efx);
955
956 /* Initialise the interrupt moderation settings */
957 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec);
958
959 return 0;
960}
961
962static void efx_remove_nic(struct efx_nic *efx)
963{
964 EFX_LOG(efx, "destroying NIC\n");
965
966 efx_remove_interrupts(efx);
967 falcon_remove_nic(efx);
968}
969
970/**************************************************************************
971 *
972 * NIC startup/shutdown
973 *
974 *************************************************************************/
975
976static int efx_probe_all(struct efx_nic *efx)
977{
978 struct efx_channel *channel;
979 int rc;
980
981 /* Create NIC */
982 rc = efx_probe_nic(efx);
983 if (rc) {
984 EFX_ERR(efx, "failed to create NIC\n");
985 goto fail1;
986 }
987
988 /* Create port */
989 rc = efx_probe_port(efx);
990 if (rc) {
991 EFX_ERR(efx, "failed to create port\n");
992 goto fail2;
993 }
994
995 /* Create channels */
996 efx_for_each_channel(channel, efx) {
997 rc = efx_probe_channel(channel);
998 if (rc) {
999 EFX_ERR(efx, "failed to create channel %d\n",
1000 channel->channel);
1001 goto fail3;
1002 }
1003 }
1004
1005 return 0;
1006
1007 fail3:
1008 efx_for_each_channel(channel, efx)
1009 efx_remove_channel(channel);
1010 efx_remove_port(efx);
1011 fail2:
1012 efx_remove_nic(efx);
1013 fail1:
1014 return rc;
1015}
1016
1017/* Called after previous invocation(s) of efx_stop_all, restarts the
1018 * port, kernel transmit queue, NAPI processing and hardware interrupts,
1019 * and ensures that the port is scheduled to be reconfigured.
1020 * This function is safe to call multiple times when the NIC is in any
1021 * state. */
1022static void efx_start_all(struct efx_nic *efx)
1023{
1024 struct efx_channel *channel;
1025
1026 EFX_ASSERT_RESET_SERIALISED(efx);
1027
1028 /* Check that it is appropriate to restart the interface. All
1029 * of these flags are safe to read under just the rtnl lock */
1030 if (efx->port_enabled)
1031 return;
1032 if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
1033 return;
Ben Hutchings55668612008-05-16 21:16:10 +01001034 if (efx_dev_registered(efx) && !netif_running(efx->net_dev))
Ben Hutchings8ceee662008-04-27 12:55:59 +01001035 return;
1036
1037 /* Mark the port as enabled so port reconfigurations can start, then
1038 * restart the transmit interface early so the watchdog timer stops */
1039 efx_start_port(efx);
1040 efx_wake_queue(efx);
1041
1042 efx_for_each_channel(channel, efx)
1043 efx_start_channel(channel);
1044
1045 falcon_enable_interrupts(efx);
1046
1047 /* Start hardware monitor if we're in RUNNING */
1048 if (efx->state == STATE_RUNNING)
1049 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1050 efx_monitor_interval);
1051}
1052
1053/* Flush all delayed work. Should only be called when no more delayed work
1054 * will be scheduled. This doesn't flush pending online resets (efx_reset),
1055 * since we're holding the rtnl_lock at this point. */
1056static void efx_flush_all(struct efx_nic *efx)
1057{
1058 struct efx_rx_queue *rx_queue;
1059
1060 /* Make sure the hardware monitor is stopped */
1061 cancel_delayed_work_sync(&efx->monitor_work);
1062
1063 /* Ensure that all RX slow refills are complete. */
Ben Hutchingsb3475642008-05-16 21:15:49 +01001064 efx_for_each_rx_queue(rx_queue, efx)
Ben Hutchings8ceee662008-04-27 12:55:59 +01001065 cancel_delayed_work_sync(&rx_queue->work);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001066
1067 /* Stop scheduled port reconfigurations */
1068 cancel_work_sync(&efx->reconfigure_work);
1069
1070}
1071
1072/* Quiesce hardware and software without bringing the link down.
1073 * Safe to call multiple times, when the nic and interface is in any
1074 * state. The caller is guaranteed to subsequently be in a position
1075 * to modify any hardware and software state they see fit without
1076 * taking locks. */
1077static void efx_stop_all(struct efx_nic *efx)
1078{
1079 struct efx_channel *channel;
1080
1081 EFX_ASSERT_RESET_SERIALISED(efx);
1082
1083 /* port_enabled can be read safely under the rtnl lock */
1084 if (!efx->port_enabled)
1085 return;
1086
1087 /* Disable interrupts and wait for ISR to complete */
1088 falcon_disable_interrupts(efx);
1089 if (efx->legacy_irq)
1090 synchronize_irq(efx->legacy_irq);
Ben Hutchingsb3475642008-05-16 21:15:49 +01001091 efx_for_each_channel_with_interrupt(channel, efx) {
Ben Hutchings8ceee662008-04-27 12:55:59 +01001092 if (channel->irq)
1093 synchronize_irq(channel->irq);
Ben Hutchingsb3475642008-05-16 21:15:49 +01001094 }
Ben Hutchings8ceee662008-04-27 12:55:59 +01001095
1096 /* Stop all NAPI processing and synchronous rx refills */
1097 efx_for_each_channel(channel, efx)
1098 efx_stop_channel(channel);
1099
1100 /* Stop all asynchronous port reconfigurations. Since all
1101 * event processing has already been stopped, there is no
1102 * window to loose phy events */
1103 efx_stop_port(efx);
1104
1105 /* Flush reconfigure_work, refill_workqueue, monitor_work */
1106 efx_flush_all(efx);
1107
1108 /* Isolate the MAC from the TX and RX engines, so that queue
1109 * flushes will complete in a timely fashion. */
1110 falcon_deconfigure_mac_wrapper(efx);
1111 falcon_drain_tx_fifo(efx);
1112
1113 /* Stop the kernel transmit interface late, so the watchdog
1114 * timer isn't ticking over the flush */
1115 efx_stop_queue(efx);
Ben Hutchings55668612008-05-16 21:16:10 +01001116 if (efx_dev_registered(efx)) {
Ben Hutchings8ceee662008-04-27 12:55:59 +01001117 netif_tx_lock_bh(efx->net_dev);
1118 netif_tx_unlock_bh(efx->net_dev);
1119 }
1120}
1121
1122static void efx_remove_all(struct efx_nic *efx)
1123{
1124 struct efx_channel *channel;
1125
1126 efx_for_each_channel(channel, efx)
1127 efx_remove_channel(channel);
1128 efx_remove_port(efx);
1129 efx_remove_nic(efx);
1130}
1131
1132/* A convinience function to safely flush all the queues */
1133int efx_flush_queues(struct efx_nic *efx)
1134{
1135 int rc;
1136
1137 EFX_ASSERT_RESET_SERIALISED(efx);
1138
1139 efx_stop_all(efx);
1140
1141 efx_fini_channels(efx);
1142 rc = efx_init_channels(efx);
1143 if (rc) {
1144 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1145 return rc;
1146 }
1147
1148 efx_start_all(efx);
1149
1150 return 0;
1151}
1152
1153/**************************************************************************
1154 *
1155 * Interrupt moderation
1156 *
1157 **************************************************************************/
1158
1159/* Set interrupt moderation parameters */
1160void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs)
1161{
1162 struct efx_tx_queue *tx_queue;
1163 struct efx_rx_queue *rx_queue;
1164
1165 EFX_ASSERT_RESET_SERIALISED(efx);
1166
1167 efx_for_each_tx_queue(tx_queue, efx)
1168 tx_queue->channel->irq_moderation = tx_usecs;
1169
1170 efx_for_each_rx_queue(rx_queue, efx)
1171 rx_queue->channel->irq_moderation = rx_usecs;
1172}
1173
1174/**************************************************************************
1175 *
1176 * Hardware monitor
1177 *
1178 **************************************************************************/
1179
1180/* Run periodically off the general workqueue. Serialised against
1181 * efx_reconfigure_port via the mac_lock */
1182static void efx_monitor(struct work_struct *data)
1183{
1184 struct efx_nic *efx = container_of(data, struct efx_nic,
1185 monitor_work.work);
1186 int rc = 0;
1187
1188 EFX_TRACE(efx, "hardware monitor executing on CPU %d\n",
1189 raw_smp_processor_id());
1190
1191
1192 /* If the mac_lock is already held then it is likely a port
1193 * reconfiguration is already in place, which will likely do
1194 * most of the work of check_hw() anyway. */
1195 if (!mutex_trylock(&efx->mac_lock)) {
1196 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1197 efx_monitor_interval);
1198 return;
1199 }
1200
1201 if (efx->port_enabled)
1202 rc = falcon_check_xmac(efx);
1203 mutex_unlock(&efx->mac_lock);
1204
1205 if (rc) {
1206 if (monitor_reset) {
1207 EFX_ERR(efx, "hardware monitor detected a fault: "
1208 "triggering reset\n");
1209 efx_schedule_reset(efx, RESET_TYPE_MONITOR);
1210 } else {
1211 EFX_ERR(efx, "hardware monitor detected a fault, "
1212 "skipping reset\n");
1213 }
1214 }
1215
1216 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1217 efx_monitor_interval);
1218}
1219
1220/**************************************************************************
1221 *
1222 * ioctls
1223 *
1224 *************************************************************************/
1225
1226/* Net device ioctl
1227 * Context: process, rtnl_lock() held.
1228 */
1229static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
1230{
1231 struct efx_nic *efx = net_dev->priv;
1232
1233 EFX_ASSERT_RESET_SERIALISED(efx);
1234
1235 return generic_mii_ioctl(&efx->mii, if_mii(ifr), cmd, NULL);
1236}
1237
1238/**************************************************************************
1239 *
1240 * NAPI interface
1241 *
1242 **************************************************************************/
1243
1244static int efx_init_napi(struct efx_nic *efx)
1245{
1246 struct efx_channel *channel;
1247 int rc;
1248
1249 efx_for_each_channel(channel, efx) {
1250 channel->napi_dev = efx->net_dev;
1251 rc = efx_lro_init(&channel->lro_mgr, efx);
1252 if (rc)
1253 goto err;
1254 }
1255 return 0;
1256 err:
1257 efx_fini_napi(efx);
1258 return rc;
1259}
1260
1261static void efx_fini_napi(struct efx_nic *efx)
1262{
1263 struct efx_channel *channel;
1264
1265 efx_for_each_channel(channel, efx) {
1266 efx_lro_fini(&channel->lro_mgr);
1267 channel->napi_dev = NULL;
1268 }
1269}
1270
1271/**************************************************************************
1272 *
1273 * Kernel netpoll interface
1274 *
1275 *************************************************************************/
1276
1277#ifdef CONFIG_NET_POLL_CONTROLLER
1278
1279/* Although in the common case interrupts will be disabled, this is not
1280 * guaranteed. However, all our work happens inside the NAPI callback,
1281 * so no locking is required.
1282 */
1283static void efx_netpoll(struct net_device *net_dev)
1284{
1285 struct efx_nic *efx = net_dev->priv;
1286 struct efx_channel *channel;
1287
1288 efx_for_each_channel_with_interrupt(channel, efx)
1289 efx_schedule_channel(channel);
1290}
1291
1292#endif
1293
1294/**************************************************************************
1295 *
1296 * Kernel net device interface
1297 *
1298 *************************************************************************/
1299
1300/* Context: process, rtnl_lock() held. */
1301static int efx_net_open(struct net_device *net_dev)
1302{
1303 struct efx_nic *efx = net_dev->priv;
1304 EFX_ASSERT_RESET_SERIALISED(efx);
1305
1306 EFX_LOG(efx, "opening device %s on CPU %d\n", net_dev->name,
1307 raw_smp_processor_id());
1308
1309 efx_start_all(efx);
1310 return 0;
1311}
1312
1313/* Context: process, rtnl_lock() held.
1314 * Note that the kernel will ignore our return code; this method
1315 * should really be a void.
1316 */
1317static int efx_net_stop(struct net_device *net_dev)
1318{
1319 struct efx_nic *efx = net_dev->priv;
1320 int rc;
1321
1322 EFX_LOG(efx, "closing %s on CPU %d\n", net_dev->name,
1323 raw_smp_processor_id());
1324
1325 /* Stop the device and flush all the channels */
1326 efx_stop_all(efx);
1327 efx_fini_channels(efx);
1328 rc = efx_init_channels(efx);
1329 if (rc)
1330 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1331
1332 return 0;
1333}
1334
1335/* Context: process, dev_base_lock held, non-blocking. */
1336static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
1337{
1338 struct efx_nic *efx = net_dev->priv;
1339 struct efx_mac_stats *mac_stats = &efx->mac_stats;
1340 struct net_device_stats *stats = &net_dev->stats;
1341
1342 if (!spin_trylock(&efx->stats_lock))
1343 return stats;
1344 if (efx->state == STATE_RUNNING) {
1345 falcon_update_stats_xmac(efx);
1346 falcon_update_nic_stats(efx);
1347 }
1348 spin_unlock(&efx->stats_lock);
1349
1350 stats->rx_packets = mac_stats->rx_packets;
1351 stats->tx_packets = mac_stats->tx_packets;
1352 stats->rx_bytes = mac_stats->rx_bytes;
1353 stats->tx_bytes = mac_stats->tx_bytes;
1354 stats->multicast = mac_stats->rx_multicast;
1355 stats->collisions = mac_stats->tx_collision;
1356 stats->rx_length_errors = (mac_stats->rx_gtjumbo +
1357 mac_stats->rx_length_error);
1358 stats->rx_over_errors = efx->n_rx_nodesc_drop_cnt;
1359 stats->rx_crc_errors = mac_stats->rx_bad;
1360 stats->rx_frame_errors = mac_stats->rx_align_error;
1361 stats->rx_fifo_errors = mac_stats->rx_overflow;
1362 stats->rx_missed_errors = mac_stats->rx_missed;
1363 stats->tx_window_errors = mac_stats->tx_late_collision;
1364
1365 stats->rx_errors = (stats->rx_length_errors +
1366 stats->rx_over_errors +
1367 stats->rx_crc_errors +
1368 stats->rx_frame_errors +
1369 stats->rx_fifo_errors +
1370 stats->rx_missed_errors +
1371 mac_stats->rx_symbol_error);
1372 stats->tx_errors = (stats->tx_window_errors +
1373 mac_stats->tx_bad);
1374
1375 return stats;
1376}
1377
1378/* Context: netif_tx_lock held, BHs disabled. */
1379static void efx_watchdog(struct net_device *net_dev)
1380{
1381 struct efx_nic *efx = net_dev->priv;
1382
1383 EFX_ERR(efx, "TX stuck with stop_count=%d port_enabled=%d: %s\n",
1384 atomic_read(&efx->netif_stop_count), efx->port_enabled,
1385 monitor_reset ? "resetting channels" : "skipping reset");
1386
1387 if (monitor_reset)
1388 efx_schedule_reset(efx, RESET_TYPE_MONITOR);
1389}
1390
1391
1392/* Context: process, rtnl_lock() held. */
1393static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
1394{
1395 struct efx_nic *efx = net_dev->priv;
1396 int rc = 0;
1397
1398 EFX_ASSERT_RESET_SERIALISED(efx);
1399
1400 if (new_mtu > EFX_MAX_MTU)
1401 return -EINVAL;
1402
1403 efx_stop_all(efx);
1404
1405 EFX_LOG(efx, "changing MTU to %d\n", new_mtu);
1406
1407 efx_fini_channels(efx);
1408 net_dev->mtu = new_mtu;
1409 rc = efx_init_channels(efx);
1410 if (rc)
1411 goto fail;
1412
1413 efx_start_all(efx);
1414 return rc;
1415
1416 fail:
1417 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1418 return rc;
1419}
1420
1421static int efx_set_mac_address(struct net_device *net_dev, void *data)
1422{
1423 struct efx_nic *efx = net_dev->priv;
1424 struct sockaddr *addr = data;
1425 char *new_addr = addr->sa_data;
1426
1427 EFX_ASSERT_RESET_SERIALISED(efx);
1428
1429 if (!is_valid_ether_addr(new_addr)) {
1430 DECLARE_MAC_BUF(mac);
1431 EFX_ERR(efx, "invalid ethernet MAC address requested: %s\n",
1432 print_mac(mac, new_addr));
1433 return -EINVAL;
1434 }
1435
1436 memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len);
1437
1438 /* Reconfigure the MAC */
1439 efx_reconfigure_port(efx);
1440
1441 return 0;
1442}
1443
1444/* Context: netif_tx_lock held, BHs disabled. */
1445static void efx_set_multicast_list(struct net_device *net_dev)
1446{
1447 struct efx_nic *efx = net_dev->priv;
1448 struct dev_mc_list *mc_list = net_dev->mc_list;
1449 union efx_multicast_hash *mc_hash = &efx->multicast_hash;
1450 int promiscuous;
1451 u32 crc;
1452 int bit;
1453 int i;
1454
1455 /* Set per-MAC promiscuity flag and reconfigure MAC if necessary */
1456 promiscuous = (net_dev->flags & IFF_PROMISC) ? 1 : 0;
1457 if (efx->promiscuous != promiscuous) {
1458 efx->promiscuous = promiscuous;
1459 /* Close the window between efx_stop_port() and efx_flush_all()
1460 * by only queuing work when the port is enabled. */
1461 if (efx->port_enabled)
1462 queue_work(efx->workqueue, &efx->reconfigure_work);
1463 }
1464
1465 /* Build multicast hash table */
1466 if (promiscuous || (net_dev->flags & IFF_ALLMULTI)) {
1467 memset(mc_hash, 0xff, sizeof(*mc_hash));
1468 } else {
1469 memset(mc_hash, 0x00, sizeof(*mc_hash));
1470 for (i = 0; i < net_dev->mc_count; i++) {
1471 crc = ether_crc_le(ETH_ALEN, mc_list->dmi_addr);
1472 bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
1473 set_bit_le(bit, mc_hash->byte);
1474 mc_list = mc_list->next;
1475 }
1476 }
1477
1478 /* Create and activate new global multicast hash table */
1479 falcon_set_multicast_hash(efx);
1480}
1481
1482static int efx_netdev_event(struct notifier_block *this,
1483 unsigned long event, void *ptr)
1484{
1485 struct net_device *net_dev = (struct net_device *)ptr;
1486
1487 if (net_dev->open == efx_net_open && event == NETDEV_CHANGENAME) {
1488 struct efx_nic *efx = net_dev->priv;
1489
1490 strcpy(efx->name, net_dev->name);
1491 }
1492
1493 return NOTIFY_DONE;
1494}
1495
1496static struct notifier_block efx_netdev_notifier = {
1497 .notifier_call = efx_netdev_event,
1498};
1499
1500static int efx_register_netdev(struct efx_nic *efx)
1501{
1502 struct net_device *net_dev = efx->net_dev;
1503 int rc;
1504
1505 net_dev->watchdog_timeo = 5 * HZ;
1506 net_dev->irq = efx->pci_dev->irq;
1507 net_dev->open = efx_net_open;
1508 net_dev->stop = efx_net_stop;
1509 net_dev->get_stats = efx_net_stats;
1510 net_dev->tx_timeout = &efx_watchdog;
1511 net_dev->hard_start_xmit = efx_hard_start_xmit;
1512 net_dev->do_ioctl = efx_ioctl;
1513 net_dev->change_mtu = efx_change_mtu;
1514 net_dev->set_mac_address = efx_set_mac_address;
1515 net_dev->set_multicast_list = efx_set_multicast_list;
1516#ifdef CONFIG_NET_POLL_CONTROLLER
1517 net_dev->poll_controller = efx_netpoll;
1518#endif
1519 SET_NETDEV_DEV(net_dev, &efx->pci_dev->dev);
1520 SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
1521
1522 /* Always start with carrier off; PHY events will detect the link */
1523 netif_carrier_off(efx->net_dev);
1524
1525 /* Clear MAC statistics */
1526 falcon_update_stats_xmac(efx);
1527 memset(&efx->mac_stats, 0, sizeof(efx->mac_stats));
1528
1529 rc = register_netdev(net_dev);
1530 if (rc) {
1531 EFX_ERR(efx, "could not register net dev\n");
1532 return rc;
1533 }
1534 strcpy(efx->name, net_dev->name);
1535
1536 return 0;
1537}
1538
1539static void efx_unregister_netdev(struct efx_nic *efx)
1540{
1541 struct efx_tx_queue *tx_queue;
1542
1543 if (!efx->net_dev)
1544 return;
1545
1546 BUG_ON(efx->net_dev->priv != efx);
1547
1548 /* Free up any skbs still remaining. This has to happen before
1549 * we try to unregister the netdev as running their destructors
1550 * may be needed to get the device ref. count to 0. */
1551 efx_for_each_tx_queue(tx_queue, efx)
1552 efx_release_tx_buffers(tx_queue);
1553
Ben Hutchings55668612008-05-16 21:16:10 +01001554 if (efx_dev_registered(efx)) {
Ben Hutchings8ceee662008-04-27 12:55:59 +01001555 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
1556 unregister_netdev(efx->net_dev);
1557 }
1558}
1559
1560/**************************************************************************
1561 *
1562 * Device reset and suspend
1563 *
1564 **************************************************************************/
1565
1566/* The final hardware and software finalisation before reset. */
1567static int efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd)
1568{
1569 int rc;
1570
1571 EFX_ASSERT_RESET_SERIALISED(efx);
1572
1573 rc = falcon_xmac_get_settings(efx, ecmd);
1574 if (rc) {
1575 EFX_ERR(efx, "could not back up PHY settings\n");
1576 goto fail;
1577 }
1578
1579 efx_fini_channels(efx);
1580 return 0;
1581
1582 fail:
1583 return rc;
1584}
1585
1586/* The first part of software initialisation after a hardware reset
1587 * This function does not handle serialisation with the kernel, it
1588 * assumes the caller has done this */
1589static int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd)
1590{
1591 int rc;
1592
1593 rc = efx_init_channels(efx);
1594 if (rc)
1595 goto fail1;
1596
1597 /* Restore MAC and PHY settings. */
1598 rc = falcon_xmac_set_settings(efx, ecmd);
1599 if (rc) {
1600 EFX_ERR(efx, "could not restore PHY settings\n");
1601 goto fail2;
1602 }
1603
1604 return 0;
1605
1606 fail2:
1607 efx_fini_channels(efx);
1608 fail1:
1609 return rc;
1610}
1611
1612/* Reset the NIC as transparently as possible. Do not reset the PHY
1613 * Note that the reset may fail, in which case the card will be left
1614 * in a most-probably-unusable state.
1615 *
1616 * This function will sleep. You cannot reset from within an atomic
1617 * state; use efx_schedule_reset() instead.
1618 *
1619 * Grabs the rtnl_lock.
1620 */
1621static int efx_reset(struct efx_nic *efx)
1622{
1623 struct ethtool_cmd ecmd;
1624 enum reset_type method = efx->reset_pending;
1625 int rc;
1626
1627 /* Serialise with kernel interfaces */
1628 rtnl_lock();
1629
1630 /* If we're not RUNNING then don't reset. Leave the reset_pending
1631 * flag set so that efx_pci_probe_main will be retried */
1632 if (efx->state != STATE_RUNNING) {
1633 EFX_INFO(efx, "scheduled reset quenched. NIC not RUNNING\n");
1634 goto unlock_rtnl;
1635 }
1636
1637 efx->state = STATE_RESETTING;
1638 EFX_INFO(efx, "resetting (%d)\n", method);
1639
1640 /* The net_dev->get_stats handler is quite slow, and will fail
1641 * if a fetch is pending over reset. Serialise against it. */
1642 spin_lock(&efx->stats_lock);
1643 spin_unlock(&efx->stats_lock);
1644
1645 efx_stop_all(efx);
1646 mutex_lock(&efx->mac_lock);
1647
1648 rc = efx_reset_down(efx, &ecmd);
1649 if (rc)
1650 goto fail1;
1651
1652 rc = falcon_reset_hw(efx, method);
1653 if (rc) {
1654 EFX_ERR(efx, "failed to reset hardware\n");
1655 goto fail2;
1656 }
1657
1658 /* Allow resets to be rescheduled. */
1659 efx->reset_pending = RESET_TYPE_NONE;
1660
1661 /* Reinitialise bus-mastering, which may have been turned off before
1662 * the reset was scheduled. This is still appropriate, even in the
1663 * RESET_TYPE_DISABLE since this driver generally assumes the hardware
1664 * can respond to requests. */
1665 pci_set_master(efx->pci_dev);
1666
1667 /* Reinitialise device. This is appropriate in the RESET_TYPE_DISABLE
1668 * case so the driver can talk to external SRAM */
1669 rc = falcon_init_nic(efx);
1670 if (rc) {
1671 EFX_ERR(efx, "failed to initialise NIC\n");
1672 goto fail3;
1673 }
1674
1675 /* Leave device stopped if necessary */
1676 if (method == RESET_TYPE_DISABLE) {
1677 /* Reinitialise the device anyway so the driver unload sequence
1678 * can talk to the external SRAM */
Ben Hutchings91ad7572008-05-16 21:14:27 +01001679 falcon_init_nic(efx);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001680 rc = -EIO;
1681 goto fail4;
1682 }
1683
1684 rc = efx_reset_up(efx, &ecmd);
1685 if (rc)
1686 goto fail5;
1687
1688 mutex_unlock(&efx->mac_lock);
1689 EFX_LOG(efx, "reset complete\n");
1690
1691 efx->state = STATE_RUNNING;
1692 efx_start_all(efx);
1693
1694 unlock_rtnl:
1695 rtnl_unlock();
1696 return 0;
1697
1698 fail5:
1699 fail4:
1700 fail3:
1701 fail2:
1702 fail1:
1703 EFX_ERR(efx, "has been disabled\n");
1704 efx->state = STATE_DISABLED;
1705
1706 mutex_unlock(&efx->mac_lock);
1707 rtnl_unlock();
1708 efx_unregister_netdev(efx);
1709 efx_fini_port(efx);
1710 return rc;
1711}
1712
1713/* The worker thread exists so that code that cannot sleep can
1714 * schedule a reset for later.
1715 */
1716static void efx_reset_work(struct work_struct *data)
1717{
1718 struct efx_nic *nic = container_of(data, struct efx_nic, reset_work);
1719
1720 efx_reset(nic);
1721}
1722
1723void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
1724{
1725 enum reset_type method;
1726
1727 if (efx->reset_pending != RESET_TYPE_NONE) {
1728 EFX_INFO(efx, "quenching already scheduled reset\n");
1729 return;
1730 }
1731
1732 switch (type) {
1733 case RESET_TYPE_INVISIBLE:
1734 case RESET_TYPE_ALL:
1735 case RESET_TYPE_WORLD:
1736 case RESET_TYPE_DISABLE:
1737 method = type;
1738 break;
1739 case RESET_TYPE_RX_RECOVERY:
1740 case RESET_TYPE_RX_DESC_FETCH:
1741 case RESET_TYPE_TX_DESC_FETCH:
1742 case RESET_TYPE_TX_SKIP:
1743 method = RESET_TYPE_INVISIBLE;
1744 break;
1745 default:
1746 method = RESET_TYPE_ALL;
1747 break;
1748 }
1749
1750 if (method != type)
1751 EFX_LOG(efx, "scheduling reset (%d:%d)\n", type, method);
1752 else
1753 EFX_LOG(efx, "scheduling reset (%d)\n", method);
1754
1755 efx->reset_pending = method;
1756
1757 queue_work(efx->workqueue, &efx->reset_work);
1758}
1759
1760/**************************************************************************
1761 *
1762 * List of NICs we support
1763 *
1764 **************************************************************************/
1765
1766/* PCI device ID table */
1767static struct pci_device_id efx_pci_table[] __devinitdata = {
1768 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID),
1769 .driver_data = (unsigned long) &falcon_a_nic_type},
1770 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID),
1771 .driver_data = (unsigned long) &falcon_b_nic_type},
1772 {0} /* end of list */
1773};
1774
1775/**************************************************************************
1776 *
1777 * Dummy PHY/MAC/Board operations
1778 *
1779 * Can be used where the MAC does not implement this operation
1780 * Needed so all function pointers are valid and do not have to be tested
1781 * before use
1782 *
1783 **************************************************************************/
1784int efx_port_dummy_op_int(struct efx_nic *efx)
1785{
1786 return 0;
1787}
1788void efx_port_dummy_op_void(struct efx_nic *efx) {}
1789void efx_port_dummy_op_blink(struct efx_nic *efx, int blink) {}
1790
1791static struct efx_phy_operations efx_dummy_phy_operations = {
1792 .init = efx_port_dummy_op_int,
1793 .reconfigure = efx_port_dummy_op_void,
1794 .check_hw = efx_port_dummy_op_int,
1795 .fini = efx_port_dummy_op_void,
1796 .clear_interrupt = efx_port_dummy_op_void,
1797 .reset_xaui = efx_port_dummy_op_void,
1798};
1799
1800/* Dummy board operations */
1801static int efx_nic_dummy_op_int(struct efx_nic *nic)
1802{
1803 return 0;
1804}
1805
1806static struct efx_board efx_dummy_board_info = {
1807 .init = efx_nic_dummy_op_int,
1808 .init_leds = efx_port_dummy_op_int,
1809 .set_fault_led = efx_port_dummy_op_blink,
1810};
1811
1812/**************************************************************************
1813 *
1814 * Data housekeeping
1815 *
1816 **************************************************************************/
1817
1818/* This zeroes out and then fills in the invariants in a struct
1819 * efx_nic (including all sub-structures).
1820 */
1821static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
1822 struct pci_dev *pci_dev, struct net_device *net_dev)
1823{
1824 struct efx_channel *channel;
1825 struct efx_tx_queue *tx_queue;
1826 struct efx_rx_queue *rx_queue;
1827 int i, rc;
1828
1829 /* Initialise common structures */
1830 memset(efx, 0, sizeof(*efx));
1831 spin_lock_init(&efx->biu_lock);
1832 spin_lock_init(&efx->phy_lock);
1833 INIT_WORK(&efx->reset_work, efx_reset_work);
1834 INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
1835 efx->pci_dev = pci_dev;
1836 efx->state = STATE_INIT;
1837 efx->reset_pending = RESET_TYPE_NONE;
1838 strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
1839 efx->board_info = efx_dummy_board_info;
1840
1841 efx->net_dev = net_dev;
1842 efx->rx_checksum_enabled = 1;
1843 spin_lock_init(&efx->netif_stop_lock);
1844 spin_lock_init(&efx->stats_lock);
1845 mutex_init(&efx->mac_lock);
1846 efx->phy_op = &efx_dummy_phy_operations;
1847 efx->mii.dev = net_dev;
1848 INIT_WORK(&efx->reconfigure_work, efx_reconfigure_work);
1849 atomic_set(&efx->netif_stop_count, 1);
1850
1851 for (i = 0; i < EFX_MAX_CHANNELS; i++) {
1852 channel = &efx->channel[i];
1853 channel->efx = efx;
1854 channel->channel = i;
1855 channel->evqnum = i;
1856 channel->work_pending = 0;
1857 }
1858 for (i = 0; i < EFX_MAX_TX_QUEUES; i++) {
1859 tx_queue = &efx->tx_queue[i];
1860 tx_queue->efx = efx;
1861 tx_queue->queue = i;
1862 tx_queue->buffer = NULL;
1863 tx_queue->channel = &efx->channel[0]; /* for safety */
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001864 tx_queue->tso_headers_free = NULL;
Ben Hutchings8ceee662008-04-27 12:55:59 +01001865 }
1866 for (i = 0; i < EFX_MAX_RX_QUEUES; i++) {
1867 rx_queue = &efx->rx_queue[i];
1868 rx_queue->efx = efx;
1869 rx_queue->queue = i;
1870 rx_queue->channel = &efx->channel[0]; /* for safety */
1871 rx_queue->buffer = NULL;
1872 spin_lock_init(&rx_queue->add_lock);
1873 INIT_DELAYED_WORK(&rx_queue->work, efx_rx_work);
1874 }
1875
1876 efx->type = type;
1877
1878 /* Sanity-check NIC type */
1879 EFX_BUG_ON_PARANOID(efx->type->txd_ring_mask &
1880 (efx->type->txd_ring_mask + 1));
1881 EFX_BUG_ON_PARANOID(efx->type->rxd_ring_mask &
1882 (efx->type->rxd_ring_mask + 1));
1883 EFX_BUG_ON_PARANOID(efx->type->evq_size &
1884 (efx->type->evq_size - 1));
1885 /* As close as we can get to guaranteeing that we don't overflow */
1886 EFX_BUG_ON_PARANOID(efx->type->evq_size <
1887 (efx->type->txd_ring_mask + 1 +
1888 efx->type->rxd_ring_mask + 1));
1889 EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
1890
1891 /* Higher numbered interrupt modes are less capable! */
1892 efx->interrupt_mode = max(efx->type->max_interrupt_mode,
1893 interrupt_mode);
1894
1895 efx->workqueue = create_singlethread_workqueue("sfc_work");
1896 if (!efx->workqueue) {
1897 rc = -ENOMEM;
1898 goto fail1;
1899 }
1900
1901 return 0;
1902
1903 fail1:
1904 return rc;
1905}
1906
1907static void efx_fini_struct(struct efx_nic *efx)
1908{
1909 if (efx->workqueue) {
1910 destroy_workqueue(efx->workqueue);
1911 efx->workqueue = NULL;
1912 }
1913}
1914
1915/**************************************************************************
1916 *
1917 * PCI interface
1918 *
1919 **************************************************************************/
1920
1921/* Main body of final NIC shutdown code
1922 * This is called only at module unload (or hotplug removal).
1923 */
1924static void efx_pci_remove_main(struct efx_nic *efx)
1925{
1926 EFX_ASSERT_RESET_SERIALISED(efx);
1927
1928 /* Skip everything if we never obtained a valid membase */
1929 if (!efx->membase)
1930 return;
1931
1932 efx_fini_channels(efx);
1933 efx_fini_port(efx);
1934
1935 /* Shutdown the board, then the NIC and board state */
1936 falcon_fini_interrupt(efx);
1937
1938 efx_fini_napi(efx);
1939 efx_remove_all(efx);
1940}
1941
1942/* Final NIC shutdown
1943 * This is called only at module unload (or hotplug removal).
1944 */
1945static void efx_pci_remove(struct pci_dev *pci_dev)
1946{
1947 struct efx_nic *efx;
1948
1949 efx = pci_get_drvdata(pci_dev);
1950 if (!efx)
1951 return;
1952
1953 /* Mark the NIC as fini, then stop the interface */
1954 rtnl_lock();
1955 efx->state = STATE_FINI;
1956 dev_close(efx->net_dev);
1957
1958 /* Allow any queued efx_resets() to complete */
1959 rtnl_unlock();
1960
1961 if (efx->membase == NULL)
1962 goto out;
1963
1964 efx_unregister_netdev(efx);
1965
1966 /* Wait for any scheduled resets to complete. No more will be
1967 * scheduled from this point because efx_stop_all() has been
1968 * called, we are no longer registered with driverlink, and
1969 * the net_device's have been removed. */
1970 flush_workqueue(efx->workqueue);
1971
1972 efx_pci_remove_main(efx);
1973
1974out:
1975 efx_fini_io(efx);
1976 EFX_LOG(efx, "shutdown successful\n");
1977
1978 pci_set_drvdata(pci_dev, NULL);
1979 efx_fini_struct(efx);
1980 free_netdev(efx->net_dev);
1981};
1982
1983/* Main body of NIC initialisation
1984 * This is called at module load (or hotplug insertion, theoretically).
1985 */
1986static int efx_pci_probe_main(struct efx_nic *efx)
1987{
1988 int rc;
1989
1990 /* Do start-of-day initialisation */
1991 rc = efx_probe_all(efx);
1992 if (rc)
1993 goto fail1;
1994
1995 rc = efx_init_napi(efx);
1996 if (rc)
1997 goto fail2;
1998
1999 /* Initialise the board */
2000 rc = efx->board_info.init(efx);
2001 if (rc) {
2002 EFX_ERR(efx, "failed to initialise board\n");
2003 goto fail3;
2004 }
2005
2006 rc = falcon_init_nic(efx);
2007 if (rc) {
2008 EFX_ERR(efx, "failed to initialise NIC\n");
2009 goto fail4;
2010 }
2011
2012 rc = efx_init_port(efx);
2013 if (rc) {
2014 EFX_ERR(efx, "failed to initialise port\n");
2015 goto fail5;
2016 }
2017
2018 rc = efx_init_channels(efx);
2019 if (rc)
2020 goto fail6;
2021
2022 rc = falcon_init_interrupt(efx);
2023 if (rc)
2024 goto fail7;
2025
2026 return 0;
2027
2028 fail7:
2029 efx_fini_channels(efx);
2030 fail6:
2031 efx_fini_port(efx);
2032 fail5:
2033 fail4:
2034 fail3:
2035 efx_fini_napi(efx);
2036 fail2:
2037 efx_remove_all(efx);
2038 fail1:
2039 return rc;
2040}
2041
2042/* NIC initialisation
2043 *
2044 * This is called at module load (or hotplug insertion,
2045 * theoretically). It sets up PCI mappings, tests and resets the NIC,
2046 * sets up and registers the network devices with the kernel and hooks
2047 * the interrupt service routine. It does not prepare the device for
2048 * transmission; this is left to the first time one of the network
2049 * interfaces is brought up (i.e. efx_net_open).
2050 */
2051static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2052 const struct pci_device_id *entry)
2053{
2054 struct efx_nic_type *type = (struct efx_nic_type *) entry->driver_data;
2055 struct net_device *net_dev;
2056 struct efx_nic *efx;
2057 int i, rc;
2058
2059 /* Allocate and initialise a struct net_device and struct efx_nic */
2060 net_dev = alloc_etherdev(sizeof(*efx));
2061 if (!net_dev)
2062 return -ENOMEM;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01002063 net_dev->features |= (NETIF_F_IP_CSUM | NETIF_F_SG |
2064 NETIF_F_HIGHDMA | NETIF_F_TSO);
Ben Hutchings8ceee662008-04-27 12:55:59 +01002065 if (lro)
2066 net_dev->features |= NETIF_F_LRO;
2067 efx = net_dev->priv;
2068 pci_set_drvdata(pci_dev, efx);
2069 rc = efx_init_struct(efx, type, pci_dev, net_dev);
2070 if (rc)
2071 goto fail1;
2072
2073 EFX_INFO(efx, "Solarflare Communications NIC detected\n");
2074
2075 /* Set up basic I/O (BAR mappings etc) */
2076 rc = efx_init_io(efx);
2077 if (rc)
2078 goto fail2;
2079
2080 /* No serialisation is required with the reset path because
2081 * we're in STATE_INIT. */
2082 for (i = 0; i < 5; i++) {
2083 rc = efx_pci_probe_main(efx);
2084 if (rc == 0)
2085 break;
2086
2087 /* Serialise against efx_reset(). No more resets will be
2088 * scheduled since efx_stop_all() has been called, and we
2089 * have not and never have been registered with either
2090 * the rtnetlink or driverlink layers. */
2091 cancel_work_sync(&efx->reset_work);
2092
2093 /* Retry if a recoverably reset event has been scheduled */
2094 if ((efx->reset_pending != RESET_TYPE_INVISIBLE) &&
2095 (efx->reset_pending != RESET_TYPE_ALL))
2096 goto fail3;
2097
2098 efx->reset_pending = RESET_TYPE_NONE;
2099 }
2100
2101 if (rc) {
2102 EFX_ERR(efx, "Could not reset NIC\n");
2103 goto fail4;
2104 }
2105
2106 /* Switch to the running state before we expose the device to
2107 * the OS. This is to ensure that the initial gathering of
2108 * MAC stats succeeds. */
2109 rtnl_lock();
2110 efx->state = STATE_RUNNING;
2111 rtnl_unlock();
2112
2113 rc = efx_register_netdev(efx);
2114 if (rc)
2115 goto fail5;
2116
2117 EFX_LOG(efx, "initialisation successful\n");
2118
2119 return 0;
2120
2121 fail5:
2122 efx_pci_remove_main(efx);
2123 fail4:
2124 fail3:
2125 efx_fini_io(efx);
2126 fail2:
2127 efx_fini_struct(efx);
2128 fail1:
2129 EFX_LOG(efx, "initialisation failed. rc=%d\n", rc);
2130 free_netdev(net_dev);
2131 return rc;
2132}
2133
2134static struct pci_driver efx_pci_driver = {
2135 .name = EFX_DRIVER_NAME,
2136 .id_table = efx_pci_table,
2137 .probe = efx_pci_probe,
2138 .remove = efx_pci_remove,
2139};
2140
2141/**************************************************************************
2142 *
2143 * Kernel module interface
2144 *
2145 *************************************************************************/
2146
2147module_param(interrupt_mode, uint, 0444);
2148MODULE_PARM_DESC(interrupt_mode,
2149 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
2150
2151static int __init efx_init_module(void)
2152{
2153 int rc;
2154
2155 printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n");
2156
2157 rc = register_netdevice_notifier(&efx_netdev_notifier);
2158 if (rc)
2159 goto err_notifier;
2160
2161 refill_workqueue = create_workqueue("sfc_refill");
2162 if (!refill_workqueue) {
2163 rc = -ENOMEM;
2164 goto err_refill;
2165 }
2166
2167 rc = pci_register_driver(&efx_pci_driver);
2168 if (rc < 0)
2169 goto err_pci;
2170
2171 return 0;
2172
2173 err_pci:
2174 destroy_workqueue(refill_workqueue);
2175 err_refill:
2176 unregister_netdevice_notifier(&efx_netdev_notifier);
2177 err_notifier:
2178 return rc;
2179}
2180
2181static void __exit efx_exit_module(void)
2182{
2183 printk(KERN_INFO "Solarflare NET driver unloading\n");
2184
2185 pci_unregister_driver(&efx_pci_driver);
2186 destroy_workqueue(refill_workqueue);
2187 unregister_netdevice_notifier(&efx_netdev_notifier);
2188
2189}
2190
2191module_init(efx_init_module);
2192module_exit(efx_exit_module);
2193
2194MODULE_AUTHOR("Michael Brown <mbrown@fensystems.co.uk> and "
2195 "Solarflare Communications");
2196MODULE_DESCRIPTION("Solarflare Communications network driver");
2197MODULE_LICENSE("GPL");
2198MODULE_DEVICE_TABLE(pci, efx_pci_table);