blob: 864095ea5b372bfdc2c91f290a84e8815637623a [file] [log] [blame]
Ben Hutchings8ceee662008-04-27 12:55:59 +01001/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/module.h>
12#include <linux/pci.h>
13#include <linux/netdevice.h>
14#include <linux/etherdevice.h>
15#include <linux/delay.h>
16#include <linux/notifier.h>
17#include <linux/ip.h>
18#include <linux/tcp.h>
19#include <linux/in.h>
20#include <linux/crc32.h>
21#include <linux/ethtool.h>
Ben Hutchingsaa6ef272008-07-18 19:03:10 +010022#include <linux/topology.h>
Ben Hutchings8ceee662008-04-27 12:55:59 +010023#include "net_driver.h"
24#include "gmii.h"
25#include "ethtool.h"
26#include "tx.h"
27#include "rx.h"
28#include "efx.h"
29#include "mdio_10g.h"
30#include "falcon.h"
Ben Hutchings8ceee662008-04-27 12:55:59 +010031#include "mac.h"
32
33#define EFX_MAX_MTU (9 * 1024)
34
35/* RX slow fill workqueue. If memory allocation fails in the fast path,
36 * a work item is pushed onto this work queue to retry the allocation later,
37 * to avoid the NIC being starved of RX buffers. Since this is a per cpu
38 * workqueue, there is nothing to be gained in making it per NIC
39 */
40static struct workqueue_struct *refill_workqueue;
41
42/**************************************************************************
43 *
44 * Configurable values
45 *
46 *************************************************************************/
47
48/*
49 * Enable large receive offload (LRO) aka soft segment reassembly (SSR)
50 *
51 * This sets the default for new devices. It can be controlled later
52 * using ethtool.
53 */
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +010054static int lro = true;
Ben Hutchings8ceee662008-04-27 12:55:59 +010055module_param(lro, int, 0644);
56MODULE_PARM_DESC(lro, "Large receive offload acceleration");
57
58/*
59 * Use separate channels for TX and RX events
60 *
61 * Set this to 1 to use separate channels for TX and RX. It allows us to
62 * apply a higher level of interrupt moderation to TX events.
63 *
64 * This is forced to 0 for MSI interrupt mode as the interrupt vector
65 * is not written
66 */
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +010067static unsigned int separate_tx_and_rx_channels = true;
Ben Hutchings8ceee662008-04-27 12:55:59 +010068
69/* This is the weight assigned to each of the (per-channel) virtual
70 * NAPI devices.
71 */
72static int napi_weight = 64;
73
74/* This is the time (in jiffies) between invocations of the hardware
75 * monitor, which checks for known hardware bugs and resets the
76 * hardware and driver as necessary.
77 */
78unsigned int efx_monitor_interval = 1 * HZ;
79
80/* This controls whether or not the hardware monitor will trigger a
81 * reset when it detects an error condition.
82 */
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +010083static unsigned int monitor_reset = true;
Ben Hutchings8ceee662008-04-27 12:55:59 +010084
85/* This controls whether or not the driver will initialise devices
86 * with invalid MAC addresses stored in the EEPROM or flash. If true,
87 * such devices will be initialised with a random locally-generated
88 * MAC address. This allows for loading the sfc_mtd driver to
89 * reprogram the flash, even if the flash contents (including the MAC
90 * address) have previously been erased.
91 */
92static unsigned int allow_bad_hwaddr;
93
94/* Initial interrupt moderation settings. They can be modified after
95 * module load with ethtool.
96 *
97 * The default for RX should strike a balance between increasing the
98 * round-trip latency and reducing overhead.
99 */
100static unsigned int rx_irq_mod_usec = 60;
101
102/* Initial interrupt moderation settings. They can be modified after
103 * module load with ethtool.
104 *
105 * This default is chosen to ensure that a 10G link does not go idle
106 * while a TX queue is stopped after it has become full. A queue is
107 * restarted when it drops below half full. The time this takes (assuming
108 * worst case 3 descriptors per packet and 1024 descriptors) is
109 * 512 / 3 * 1.2 = 205 usec.
110 */
111static unsigned int tx_irq_mod_usec = 150;
112
113/* This is the first interrupt mode to try out of:
114 * 0 => MSI-X
115 * 1 => MSI
116 * 2 => legacy
117 */
118static unsigned int interrupt_mode;
119
120/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
121 * i.e. the number of CPUs among which we may distribute simultaneous
122 * interrupt handling.
123 *
124 * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
125 * The default (0) means to assign an interrupt to each package (level II cache)
126 */
127static unsigned int rss_cpus;
128module_param(rss_cpus, uint, 0444);
129MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
130
131/**************************************************************************
132 *
133 * Utility functions and prototypes
134 *
135 *************************************************************************/
136static void efx_remove_channel(struct efx_channel *channel);
137static void efx_remove_port(struct efx_nic *efx);
138static void efx_fini_napi(struct efx_nic *efx);
139static void efx_fini_channels(struct efx_nic *efx);
140
141#define EFX_ASSERT_RESET_SERIALISED(efx) \
142 do { \
143 if ((efx->state == STATE_RUNNING) || \
144 (efx->state == STATE_RESETTING)) \
145 ASSERT_RTNL(); \
146 } while (0)
147
148/**************************************************************************
149 *
150 * Event queue processing
151 *
152 *************************************************************************/
153
154/* Process channel's event queue
155 *
156 * This function is responsible for processing the event queue of a
157 * single channel. The caller must guarantee that this function will
158 * never be concurrently called more than once on the same channel,
159 * though different channels may be being processed concurrently.
160 */
Ben Hutchings4d566062008-09-01 12:47:12 +0100161static int efx_process_channel(struct efx_channel *channel, int rx_quota)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100162{
Ben Hutchings42cbe2d2008-09-01 12:48:08 +0100163 struct efx_nic *efx = channel->efx;
164 int rx_packets;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100165
Ben Hutchings42cbe2d2008-09-01 12:48:08 +0100166 if (unlikely(efx->reset_pending != RESET_TYPE_NONE ||
Ben Hutchings8ceee662008-04-27 12:55:59 +0100167 !channel->enabled))
Ben Hutchings42cbe2d2008-09-01 12:48:08 +0100168 return 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100169
Ben Hutchings42cbe2d2008-09-01 12:48:08 +0100170 rx_packets = falcon_process_eventq(channel, rx_quota);
171 if (rx_packets == 0)
172 return 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100173
174 /* Deliver last RX packet. */
175 if (channel->rx_pkt) {
176 __efx_rx_packet(channel, channel->rx_pkt,
177 channel->rx_pkt_csummed);
178 channel->rx_pkt = NULL;
179 }
180
181 efx_flush_lro(channel);
182 efx_rx_strategy(channel);
183
Ben Hutchings42cbe2d2008-09-01 12:48:08 +0100184 efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100185
Ben Hutchings42cbe2d2008-09-01 12:48:08 +0100186 return rx_packets;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100187}
188
189/* Mark channel as finished processing
190 *
191 * Note that since we will not receive further interrupts for this
192 * channel before we finish processing and call the eventq_read_ack()
193 * method, there is no need to use the interrupt hold-off timers.
194 */
195static inline void efx_channel_processed(struct efx_channel *channel)
196{
Ben Hutchings5b9e2072008-05-16 21:18:14 +0100197 /* The interrupt handler for this channel may set work_pending
198 * as soon as we acknowledge the events we've seen. Make sure
199 * it's cleared before then. */
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100200 channel->work_pending = false;
Ben Hutchings5b9e2072008-05-16 21:18:14 +0100201 smp_wmb();
202
Ben Hutchings8ceee662008-04-27 12:55:59 +0100203 falcon_eventq_read_ack(channel);
204}
205
206/* NAPI poll handler
207 *
208 * NAPI guarantees serialisation of polls of the same device, which
209 * provides the guarantee required by efx_process_channel().
210 */
211static int efx_poll(struct napi_struct *napi, int budget)
212{
213 struct efx_channel *channel =
214 container_of(napi, struct efx_channel, napi_str);
215 struct net_device *napi_dev = channel->napi_dev;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100216 int rx_packets;
217
218 EFX_TRACE(channel->efx, "channel %d NAPI poll executing on CPU %d\n",
219 channel->channel, raw_smp_processor_id());
220
Ben Hutchings42cbe2d2008-09-01 12:48:08 +0100221 rx_packets = efx_process_channel(channel, budget);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100222
223 if (rx_packets < budget) {
224 /* There is no race here; although napi_disable() will
225 * only wait for netif_rx_complete(), this isn't a problem
226 * since efx_channel_processed() will have no effect if
227 * interrupts have already been disabled.
228 */
229 netif_rx_complete(napi_dev, napi);
230 efx_channel_processed(channel);
231 }
232
233 return rx_packets;
234}
235
236/* Process the eventq of the specified channel immediately on this CPU
237 *
238 * Disable hardware generated interrupts, wait for any existing
239 * processing to finish, then directly poll (and ack ) the eventq.
240 * Finally reenable NAPI and interrupts.
241 *
242 * Since we are touching interrupts the caller should hold the suspend lock
243 */
244void efx_process_channel_now(struct efx_channel *channel)
245{
246 struct efx_nic *efx = channel->efx;
247
248 BUG_ON(!channel->used_flags);
249 BUG_ON(!channel->enabled);
250
251 /* Disable interrupts and wait for ISRs to complete */
252 falcon_disable_interrupts(efx);
253 if (efx->legacy_irq)
254 synchronize_irq(efx->legacy_irq);
Ben Hutchings64ee3122008-09-01 12:47:38 +0100255 if (channel->irq)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100256 synchronize_irq(channel->irq);
257
258 /* Wait for any NAPI processing to complete */
259 napi_disable(&channel->napi_str);
260
261 /* Poll the channel */
Ben Hutchings91ad7572008-05-16 21:14:27 +0100262 efx_process_channel(channel, efx->type->evq_size);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100263
264 /* Ack the eventq. This may cause an interrupt to be generated
265 * when they are reenabled */
266 efx_channel_processed(channel);
267
268 napi_enable(&channel->napi_str);
269 falcon_enable_interrupts(efx);
270}
271
272/* Create event queue
273 * Event queue memory allocations are done only once. If the channel
274 * is reset, the memory buffer will be reused; this guards against
275 * errors during channel reset and also simplifies interrupt handling.
276 */
277static int efx_probe_eventq(struct efx_channel *channel)
278{
279 EFX_LOG(channel->efx, "chan %d create event queue\n", channel->channel);
280
281 return falcon_probe_eventq(channel);
282}
283
284/* Prepare channel's event queue */
285static int efx_init_eventq(struct efx_channel *channel)
286{
287 EFX_LOG(channel->efx, "chan %d init event queue\n", channel->channel);
288
289 channel->eventq_read_ptr = 0;
290
291 return falcon_init_eventq(channel);
292}
293
294static void efx_fini_eventq(struct efx_channel *channel)
295{
296 EFX_LOG(channel->efx, "chan %d fini event queue\n", channel->channel);
297
298 falcon_fini_eventq(channel);
299}
300
301static void efx_remove_eventq(struct efx_channel *channel)
302{
303 EFX_LOG(channel->efx, "chan %d remove event queue\n", channel->channel);
304
305 falcon_remove_eventq(channel);
306}
307
308/**************************************************************************
309 *
310 * Channel handling
311 *
312 *************************************************************************/
313
Ben Hutchings8ceee662008-04-27 12:55:59 +0100314static int efx_probe_channel(struct efx_channel *channel)
315{
316 struct efx_tx_queue *tx_queue;
317 struct efx_rx_queue *rx_queue;
318 int rc;
319
320 EFX_LOG(channel->efx, "creating channel %d\n", channel->channel);
321
322 rc = efx_probe_eventq(channel);
323 if (rc)
324 goto fail1;
325
326 efx_for_each_channel_tx_queue(tx_queue, channel) {
327 rc = efx_probe_tx_queue(tx_queue);
328 if (rc)
329 goto fail2;
330 }
331
332 efx_for_each_channel_rx_queue(rx_queue, channel) {
333 rc = efx_probe_rx_queue(rx_queue);
334 if (rc)
335 goto fail3;
336 }
337
338 channel->n_rx_frm_trunc = 0;
339
340 return 0;
341
342 fail3:
343 efx_for_each_channel_rx_queue(rx_queue, channel)
344 efx_remove_rx_queue(rx_queue);
345 fail2:
346 efx_for_each_channel_tx_queue(tx_queue, channel)
347 efx_remove_tx_queue(tx_queue);
348 fail1:
349 return rc;
350}
351
352
353/* Channels are shutdown and reinitialised whilst the NIC is running
354 * to propagate configuration changes (mtu, checksum offload), or
355 * to clear hardware error conditions
356 */
357static int efx_init_channels(struct efx_nic *efx)
358{
359 struct efx_tx_queue *tx_queue;
360 struct efx_rx_queue *rx_queue;
361 struct efx_channel *channel;
362 int rc = 0;
363
Ben Hutchingsf7f13b02008-05-16 21:15:06 +0100364 /* Calculate the rx buffer allocation parameters required to
365 * support the current MTU, including padding for header
366 * alignment and overruns.
367 */
368 efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
369 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
370 efx->type->rx_buffer_padding);
371 efx->rx_buffer_order = get_order(efx->rx_buffer_len);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100372
373 /* Initialise the channels */
374 efx_for_each_channel(channel, efx) {
375 EFX_LOG(channel->efx, "init chan %d\n", channel->channel);
376
377 rc = efx_init_eventq(channel);
378 if (rc)
379 goto err;
380
381 efx_for_each_channel_tx_queue(tx_queue, channel) {
382 rc = efx_init_tx_queue(tx_queue);
383 if (rc)
384 goto err;
385 }
386
387 /* The rx buffer allocation strategy is MTU dependent */
388 efx_rx_strategy(channel);
389
390 efx_for_each_channel_rx_queue(rx_queue, channel) {
391 rc = efx_init_rx_queue(rx_queue);
392 if (rc)
393 goto err;
394 }
395
396 WARN_ON(channel->rx_pkt != NULL);
397 efx_rx_strategy(channel);
398 }
399
400 return 0;
401
402 err:
403 EFX_ERR(efx, "failed to initialise channel %d\n",
404 channel ? channel->channel : -1);
405 efx_fini_channels(efx);
406 return rc;
407}
408
409/* This enables event queue processing and packet transmission.
410 *
411 * Note that this function is not allowed to fail, since that would
412 * introduce too much complexity into the suspend/resume path.
413 */
414static void efx_start_channel(struct efx_channel *channel)
415{
416 struct efx_rx_queue *rx_queue;
417
418 EFX_LOG(channel->efx, "starting chan %d\n", channel->channel);
419
420 if (!(channel->efx->net_dev->flags & IFF_UP))
421 netif_napi_add(channel->napi_dev, &channel->napi_str,
422 efx_poll, napi_weight);
423
Ben Hutchings5b9e2072008-05-16 21:18:14 +0100424 /* The interrupt handler for this channel may set work_pending
425 * as soon as we enable it. Make sure it's cleared before
426 * then. Similarly, make sure it sees the enabled flag set. */
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100427 channel->work_pending = false;
428 channel->enabled = true;
Ben Hutchings5b9e2072008-05-16 21:18:14 +0100429 smp_wmb();
Ben Hutchings8ceee662008-04-27 12:55:59 +0100430
431 napi_enable(&channel->napi_str);
432
433 /* Load up RX descriptors */
434 efx_for_each_channel_rx_queue(rx_queue, channel)
435 efx_fast_push_rx_descriptors(rx_queue);
436}
437
438/* This disables event queue processing and packet transmission.
439 * This function does not guarantee that all queue processing
440 * (e.g. RX refill) is complete.
441 */
442static void efx_stop_channel(struct efx_channel *channel)
443{
444 struct efx_rx_queue *rx_queue;
445
446 if (!channel->enabled)
447 return;
448
449 EFX_LOG(channel->efx, "stop chan %d\n", channel->channel);
450
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100451 channel->enabled = false;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100452 napi_disable(&channel->napi_str);
453
454 /* Ensure that any worker threads have exited or will be no-ops */
455 efx_for_each_channel_rx_queue(rx_queue, channel) {
456 spin_lock_bh(&rx_queue->add_lock);
457 spin_unlock_bh(&rx_queue->add_lock);
458 }
459}
460
461static void efx_fini_channels(struct efx_nic *efx)
462{
463 struct efx_channel *channel;
464 struct efx_tx_queue *tx_queue;
465 struct efx_rx_queue *rx_queue;
466
467 EFX_ASSERT_RESET_SERIALISED(efx);
468 BUG_ON(efx->port_enabled);
469
470 efx_for_each_channel(channel, efx) {
471 EFX_LOG(channel->efx, "shut down chan %d\n", channel->channel);
472
473 efx_for_each_channel_rx_queue(rx_queue, channel)
474 efx_fini_rx_queue(rx_queue);
475 efx_for_each_channel_tx_queue(tx_queue, channel)
476 efx_fini_tx_queue(tx_queue);
477 }
478
479 /* Do the event queues last so that we can handle flush events
480 * for all DMA queues. */
481 efx_for_each_channel(channel, efx) {
482 EFX_LOG(channel->efx, "shut down evq %d\n", channel->channel);
483
484 efx_fini_eventq(channel);
485 }
486}
487
488static void efx_remove_channel(struct efx_channel *channel)
489{
490 struct efx_tx_queue *tx_queue;
491 struct efx_rx_queue *rx_queue;
492
493 EFX_LOG(channel->efx, "destroy chan %d\n", channel->channel);
494
495 efx_for_each_channel_rx_queue(rx_queue, channel)
496 efx_remove_rx_queue(rx_queue);
497 efx_for_each_channel_tx_queue(tx_queue, channel)
498 efx_remove_tx_queue(tx_queue);
499 efx_remove_eventq(channel);
500
501 channel->used_flags = 0;
502}
503
504void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay)
505{
506 queue_delayed_work(refill_workqueue, &rx_queue->work, delay);
507}
508
509/**************************************************************************
510 *
511 * Port handling
512 *
513 **************************************************************************/
514
515/* This ensures that the kernel is kept informed (via
516 * netif_carrier_on/off) of the link status, and also maintains the
517 * link status's stop on the port's TX queue.
518 */
519static void efx_link_status_changed(struct efx_nic *efx)
520{
Ben Hutchings8ceee662008-04-27 12:55:59 +0100521 /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
522 * that no events are triggered between unregister_netdev() and the
523 * driver unloading. A more general condition is that NETDEV_CHANGE
524 * can only be generated between NETDEV_UP and NETDEV_DOWN */
525 if (!netif_running(efx->net_dev))
526 return;
527
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100528 if (efx->link_up != netif_carrier_ok(efx->net_dev)) {
Ben Hutchings8ceee662008-04-27 12:55:59 +0100529 efx->n_link_state_changes++;
530
531 if (efx->link_up)
532 netif_carrier_on(efx->net_dev);
533 else
534 netif_carrier_off(efx->net_dev);
535 }
536
537 /* Status message for kernel log */
538 if (efx->link_up) {
539 struct mii_if_info *gmii = &efx->mii;
540 unsigned adv, lpa;
541 /* NONE here means direct XAUI from the controller, with no
542 * MDIO-attached device we can query. */
543 if (efx->phy_type != PHY_TYPE_NONE) {
544 adv = gmii_advertised(gmii);
545 lpa = gmii_lpa(gmii);
546 } else {
547 lpa = GM_LPA_10000 | LPA_DUPLEX;
548 adv = lpa;
549 }
550 EFX_INFO(efx, "link up at %dMbps %s-duplex "
551 "(adv %04x lpa %04x) (MTU %d)%s\n",
552 (efx->link_options & GM_LPA_10000 ? 10000 :
553 (efx->link_options & GM_LPA_1000 ? 1000 :
554 (efx->link_options & GM_LPA_100 ? 100 :
555 10))),
556 (efx->link_options & GM_LPA_DUPLEX ?
557 "full" : "half"),
558 adv, lpa,
559 efx->net_dev->mtu,
560 (efx->promiscuous ? " [PROMISC]" : ""));
561 } else {
562 EFX_INFO(efx, "link down\n");
563 }
564
565}
566
567/* This call reinitialises the MAC to pick up new PHY settings. The
568 * caller must hold the mac_lock */
569static void __efx_reconfigure_port(struct efx_nic *efx)
570{
571 WARN_ON(!mutex_is_locked(&efx->mac_lock));
572
573 EFX_LOG(efx, "reconfiguring MAC from PHY settings on CPU %d\n",
574 raw_smp_processor_id());
575
576 falcon_reconfigure_xmac(efx);
577
578 /* Inform kernel of loss/gain of carrier */
579 efx_link_status_changed(efx);
580}
581
582/* Reinitialise the MAC to pick up new PHY settings, even if the port is
583 * disabled. */
584void efx_reconfigure_port(struct efx_nic *efx)
585{
586 EFX_ASSERT_RESET_SERIALISED(efx);
587
588 mutex_lock(&efx->mac_lock);
589 __efx_reconfigure_port(efx);
590 mutex_unlock(&efx->mac_lock);
591}
592
593/* Asynchronous efx_reconfigure_port work item. To speed up efx_flush_all()
594 * we don't efx_reconfigure_port() if the port is disabled. Care is taken
595 * in efx_stop_all() and efx_start_port() to prevent PHY events being lost */
596static void efx_reconfigure_work(struct work_struct *data)
597{
598 struct efx_nic *efx = container_of(data, struct efx_nic,
599 reconfigure_work);
600
601 mutex_lock(&efx->mac_lock);
602 if (efx->port_enabled)
603 __efx_reconfigure_port(efx);
604 mutex_unlock(&efx->mac_lock);
605}
606
607static int efx_probe_port(struct efx_nic *efx)
608{
609 int rc;
610
611 EFX_LOG(efx, "create port\n");
612
613 /* Connect up MAC/PHY operations table and read MAC address */
614 rc = falcon_probe_port(efx);
615 if (rc)
616 goto err;
617
618 /* Sanity check MAC address */
619 if (is_valid_ether_addr(efx->mac_address)) {
620 memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN);
621 } else {
622 DECLARE_MAC_BUF(mac);
623
624 EFX_ERR(efx, "invalid MAC address %s\n",
625 print_mac(mac, efx->mac_address));
626 if (!allow_bad_hwaddr) {
627 rc = -EINVAL;
628 goto err;
629 }
630 random_ether_addr(efx->net_dev->dev_addr);
631 EFX_INFO(efx, "using locally-generated MAC %s\n",
632 print_mac(mac, efx->net_dev->dev_addr));
633 }
634
635 return 0;
636
637 err:
638 efx_remove_port(efx);
639 return rc;
640}
641
642static int efx_init_port(struct efx_nic *efx)
643{
644 int rc;
645
646 EFX_LOG(efx, "init port\n");
647
648 /* Initialise the MAC and PHY */
649 rc = falcon_init_xmac(efx);
650 if (rc)
651 return rc;
652
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100653 efx->port_initialized = true;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100654
655 /* Reconfigure port to program MAC registers */
656 falcon_reconfigure_xmac(efx);
657
658 return 0;
659}
660
661/* Allow efx_reconfigure_port() to be scheduled, and close the window
662 * between efx_stop_port and efx_flush_all whereby a previously scheduled
663 * efx_reconfigure_port() may have been cancelled */
664static void efx_start_port(struct efx_nic *efx)
665{
666 EFX_LOG(efx, "start port\n");
667 BUG_ON(efx->port_enabled);
668
669 mutex_lock(&efx->mac_lock);
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100670 efx->port_enabled = true;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100671 __efx_reconfigure_port(efx);
672 mutex_unlock(&efx->mac_lock);
673}
674
675/* Prevent efx_reconfigure_work and efx_monitor() from executing, and
676 * efx_set_multicast_list() from scheduling efx_reconfigure_work.
677 * efx_reconfigure_work can still be scheduled via NAPI processing
678 * until efx_flush_all() is called */
679static void efx_stop_port(struct efx_nic *efx)
680{
681 EFX_LOG(efx, "stop port\n");
682
683 mutex_lock(&efx->mac_lock);
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100684 efx->port_enabled = false;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100685 mutex_unlock(&efx->mac_lock);
686
687 /* Serialise against efx_set_multicast_list() */
Ben Hutchings55668612008-05-16 21:16:10 +0100688 if (efx_dev_registered(efx)) {
David S. Millerb9e40852008-07-15 00:15:08 -0700689 netif_addr_lock_bh(efx->net_dev);
690 netif_addr_unlock_bh(efx->net_dev);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100691 }
692}
693
694static void efx_fini_port(struct efx_nic *efx)
695{
696 EFX_LOG(efx, "shut down port\n");
697
698 if (!efx->port_initialized)
699 return;
700
701 falcon_fini_xmac(efx);
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100702 efx->port_initialized = false;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100703
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100704 efx->link_up = false;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100705 efx_link_status_changed(efx);
706}
707
708static void efx_remove_port(struct efx_nic *efx)
709{
710 EFX_LOG(efx, "destroying port\n");
711
712 falcon_remove_port(efx);
713}
714
715/**************************************************************************
716 *
717 * NIC handling
718 *
719 **************************************************************************/
720
721/* This configures the PCI device to enable I/O and DMA. */
722static int efx_init_io(struct efx_nic *efx)
723{
724 struct pci_dev *pci_dev = efx->pci_dev;
725 dma_addr_t dma_mask = efx->type->max_dma_mask;
726 int rc;
727
728 EFX_LOG(efx, "initialising I/O\n");
729
730 rc = pci_enable_device(pci_dev);
731 if (rc) {
732 EFX_ERR(efx, "failed to enable PCI device\n");
733 goto fail1;
734 }
735
736 pci_set_master(pci_dev);
737
738 /* Set the PCI DMA mask. Try all possibilities from our
739 * genuine mask down to 32 bits, because some architectures
740 * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
741 * masks event though they reject 46 bit masks.
742 */
743 while (dma_mask > 0x7fffffffUL) {
744 if (pci_dma_supported(pci_dev, dma_mask) &&
745 ((rc = pci_set_dma_mask(pci_dev, dma_mask)) == 0))
746 break;
747 dma_mask >>= 1;
748 }
749 if (rc) {
750 EFX_ERR(efx, "could not find a suitable DMA mask\n");
751 goto fail2;
752 }
753 EFX_LOG(efx, "using DMA mask %llx\n", (unsigned long long) dma_mask);
754 rc = pci_set_consistent_dma_mask(pci_dev, dma_mask);
755 if (rc) {
756 /* pci_set_consistent_dma_mask() is not *allowed* to
757 * fail with a mask that pci_set_dma_mask() accepted,
758 * but just in case...
759 */
760 EFX_ERR(efx, "failed to set consistent DMA mask\n");
761 goto fail2;
762 }
763
764 efx->membase_phys = pci_resource_start(efx->pci_dev,
765 efx->type->mem_bar);
766 rc = pci_request_region(pci_dev, efx->type->mem_bar, "sfc");
767 if (rc) {
768 EFX_ERR(efx, "request for memory BAR failed\n");
769 rc = -EIO;
770 goto fail3;
771 }
772 efx->membase = ioremap_nocache(efx->membase_phys,
773 efx->type->mem_map_size);
774 if (!efx->membase) {
Ben Hutchings086ea352008-05-16 21:17:06 +0100775 EFX_ERR(efx, "could not map memory BAR %d at %llx+%x\n",
776 efx->type->mem_bar,
777 (unsigned long long)efx->membase_phys,
Ben Hutchings8ceee662008-04-27 12:55:59 +0100778 efx->type->mem_map_size);
779 rc = -ENOMEM;
780 goto fail4;
781 }
Ben Hutchings086ea352008-05-16 21:17:06 +0100782 EFX_LOG(efx, "memory BAR %u at %llx+%x (virtual %p)\n",
783 efx->type->mem_bar, (unsigned long long)efx->membase_phys,
784 efx->type->mem_map_size, efx->membase);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100785
786 return 0;
787
788 fail4:
789 release_mem_region(efx->membase_phys, efx->type->mem_map_size);
790 fail3:
Ben Hutchings2c118e02008-05-16 21:15:29 +0100791 efx->membase_phys = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100792 fail2:
793 pci_disable_device(efx->pci_dev);
794 fail1:
795 return rc;
796}
797
798static void efx_fini_io(struct efx_nic *efx)
799{
800 EFX_LOG(efx, "shutting down I/O\n");
801
802 if (efx->membase) {
803 iounmap(efx->membase);
804 efx->membase = NULL;
805 }
806
807 if (efx->membase_phys) {
808 pci_release_region(efx->pci_dev, efx->type->mem_bar);
Ben Hutchings2c118e02008-05-16 21:15:29 +0100809 efx->membase_phys = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100810 }
811
812 pci_disable_device(efx->pci_dev);
813}
814
Ben Hutchings46123d02008-09-01 12:47:33 +0100815/* Get number of RX queues wanted. Return number of online CPU
816 * packages in the expectation that an IRQ balancer will spread
817 * interrupts across them. */
818static int efx_wanted_rx_queues(void)
819{
820 cpumask_t core_mask;
821 int count;
822 int cpu;
823
824 cpus_clear(core_mask);
825 count = 0;
826 for_each_online_cpu(cpu) {
827 if (!cpu_isset(cpu, core_mask)) {
828 ++count;
829 cpus_or(core_mask, core_mask,
830 topology_core_siblings(cpu));
831 }
832 }
833
834 return count;
835}
836
837/* Probe the number and type of interrupts we are able to obtain, and
838 * the resulting numbers of channels and RX queues.
839 */
Ben Hutchings8ceee662008-04-27 12:55:59 +0100840static void efx_probe_interrupts(struct efx_nic *efx)
841{
Ben Hutchings46123d02008-09-01 12:47:33 +0100842 int max_channels =
843 min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100844 int rc, i;
845
846 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
Ben Hutchings46123d02008-09-01 12:47:33 +0100847 struct msix_entry xentries[EFX_MAX_CHANNELS];
848 int wanted_ints;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100849
Ben Hutchings46123d02008-09-01 12:47:33 +0100850 /* We want one RX queue and interrupt per CPU package
851 * (or as specified by the rss_cpus module parameter).
852 * We will need one channel per interrupt.
853 */
854 wanted_ints = rss_cpus ? rss_cpus : efx_wanted_rx_queues();
Ben Hutchings8831da72008-09-01 12:47:48 +0100855 efx->n_rx_queues = min(wanted_ints, max_channels);
Ben Hutchingsaa6ef272008-07-18 19:03:10 +0100856
Ben Hutchings8831da72008-09-01 12:47:48 +0100857 for (i = 0; i < efx->n_rx_queues; i++)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100858 xentries[i].entry = i;
Ben Hutchings8831da72008-09-01 12:47:48 +0100859 rc = pci_enable_msix(efx->pci_dev, xentries, efx->n_rx_queues);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100860 if (rc > 0) {
Ben Hutchings8831da72008-09-01 12:47:48 +0100861 EFX_BUG_ON_PARANOID(rc >= efx->n_rx_queues);
862 efx->n_rx_queues = rc;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100863 rc = pci_enable_msix(efx->pci_dev, xentries,
Ben Hutchings8831da72008-09-01 12:47:48 +0100864 efx->n_rx_queues);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100865 }
866
867 if (rc == 0) {
Ben Hutchings8831da72008-09-01 12:47:48 +0100868 for (i = 0; i < efx->n_rx_queues; i++)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100869 efx->channel[i].irq = xentries[i].vector;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100870 } else {
871 /* Fall back to single channel MSI */
872 efx->interrupt_mode = EFX_INT_MODE_MSI;
873 EFX_ERR(efx, "could not enable MSI-X\n");
874 }
875 }
876
877 /* Try single interrupt MSI */
878 if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
Ben Hutchings8831da72008-09-01 12:47:48 +0100879 efx->n_rx_queues = 1;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100880 rc = pci_enable_msi(efx->pci_dev);
881 if (rc == 0) {
882 efx->channel[0].irq = efx->pci_dev->irq;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100883 } else {
884 EFX_ERR(efx, "could not enable MSI\n");
885 efx->interrupt_mode = EFX_INT_MODE_LEGACY;
886 }
887 }
888
889 /* Assume legacy interrupts */
890 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
Ben Hutchings8831da72008-09-01 12:47:48 +0100891 efx->n_rx_queues = 1;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100892 efx->legacy_irq = efx->pci_dev->irq;
893 }
894}
895
896static void efx_remove_interrupts(struct efx_nic *efx)
897{
898 struct efx_channel *channel;
899
900 /* Remove MSI/MSI-X interrupts */
Ben Hutchings64ee3122008-09-01 12:47:38 +0100901 efx_for_each_channel(channel, efx)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100902 channel->irq = 0;
903 pci_disable_msi(efx->pci_dev);
904 pci_disable_msix(efx->pci_dev);
905
906 /* Remove legacy interrupt */
907 efx->legacy_irq = 0;
908}
909
Ben Hutchings8831da72008-09-01 12:47:48 +0100910static void efx_set_channels(struct efx_nic *efx)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100911{
912 struct efx_tx_queue *tx_queue;
913 struct efx_rx_queue *rx_queue;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100914
Ben Hutchings60ac1062008-09-01 12:44:59 +0100915 efx_for_each_tx_queue(tx_queue, efx) {
916 if (!EFX_INT_MODE_USE_MSI(efx) && separate_tx_and_rx_channels)
917 tx_queue->channel = &efx->channel[1];
918 else
919 tx_queue->channel = &efx->channel[0];
920 tx_queue->channel->used_flags |= EFX_USED_BY_TX;
921 }
Ben Hutchings8ceee662008-04-27 12:55:59 +0100922
Ben Hutchings8831da72008-09-01 12:47:48 +0100923 efx_for_each_rx_queue(rx_queue, efx) {
924 rx_queue->channel = &efx->channel[rx_queue->queue];
925 rx_queue->channel->used_flags |= EFX_USED_BY_RX;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100926 }
927}
928
929static int efx_probe_nic(struct efx_nic *efx)
930{
931 int rc;
932
933 EFX_LOG(efx, "creating NIC\n");
934
935 /* Carry out hardware-type specific initialisation */
936 rc = falcon_probe_nic(efx);
937 if (rc)
938 return rc;
939
940 /* Determine the number of channels and RX queues by trying to hook
941 * in MSI-X interrupts. */
942 efx_probe_interrupts(efx);
943
Ben Hutchings8831da72008-09-01 12:47:48 +0100944 efx_set_channels(efx);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100945
946 /* Initialise the interrupt moderation settings */
947 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec);
948
949 return 0;
950}
951
952static void efx_remove_nic(struct efx_nic *efx)
953{
954 EFX_LOG(efx, "destroying NIC\n");
955
956 efx_remove_interrupts(efx);
957 falcon_remove_nic(efx);
958}
959
960/**************************************************************************
961 *
962 * NIC startup/shutdown
963 *
964 *************************************************************************/
965
966static int efx_probe_all(struct efx_nic *efx)
967{
968 struct efx_channel *channel;
969 int rc;
970
971 /* Create NIC */
972 rc = efx_probe_nic(efx);
973 if (rc) {
974 EFX_ERR(efx, "failed to create NIC\n");
975 goto fail1;
976 }
977
978 /* Create port */
979 rc = efx_probe_port(efx);
980 if (rc) {
981 EFX_ERR(efx, "failed to create port\n");
982 goto fail2;
983 }
984
985 /* Create channels */
986 efx_for_each_channel(channel, efx) {
987 rc = efx_probe_channel(channel);
988 if (rc) {
989 EFX_ERR(efx, "failed to create channel %d\n",
990 channel->channel);
991 goto fail3;
992 }
993 }
994
995 return 0;
996
997 fail3:
998 efx_for_each_channel(channel, efx)
999 efx_remove_channel(channel);
1000 efx_remove_port(efx);
1001 fail2:
1002 efx_remove_nic(efx);
1003 fail1:
1004 return rc;
1005}
1006
1007/* Called after previous invocation(s) of efx_stop_all, restarts the
1008 * port, kernel transmit queue, NAPI processing and hardware interrupts,
1009 * and ensures that the port is scheduled to be reconfigured.
1010 * This function is safe to call multiple times when the NIC is in any
1011 * state. */
1012static void efx_start_all(struct efx_nic *efx)
1013{
1014 struct efx_channel *channel;
1015
1016 EFX_ASSERT_RESET_SERIALISED(efx);
1017
1018 /* Check that it is appropriate to restart the interface. All
1019 * of these flags are safe to read under just the rtnl lock */
1020 if (efx->port_enabled)
1021 return;
1022 if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
1023 return;
Ben Hutchings55668612008-05-16 21:16:10 +01001024 if (efx_dev_registered(efx) && !netif_running(efx->net_dev))
Ben Hutchings8ceee662008-04-27 12:55:59 +01001025 return;
1026
1027 /* Mark the port as enabled so port reconfigurations can start, then
1028 * restart the transmit interface early so the watchdog timer stops */
1029 efx_start_port(efx);
1030 efx_wake_queue(efx);
1031
1032 efx_for_each_channel(channel, efx)
1033 efx_start_channel(channel);
1034
1035 falcon_enable_interrupts(efx);
1036
1037 /* Start hardware monitor if we're in RUNNING */
1038 if (efx->state == STATE_RUNNING)
1039 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1040 efx_monitor_interval);
1041}
1042
1043/* Flush all delayed work. Should only be called when no more delayed work
1044 * will be scheduled. This doesn't flush pending online resets (efx_reset),
1045 * since we're holding the rtnl_lock at this point. */
1046static void efx_flush_all(struct efx_nic *efx)
1047{
1048 struct efx_rx_queue *rx_queue;
1049
1050 /* Make sure the hardware monitor is stopped */
1051 cancel_delayed_work_sync(&efx->monitor_work);
1052
1053 /* Ensure that all RX slow refills are complete. */
Ben Hutchingsb3475642008-05-16 21:15:49 +01001054 efx_for_each_rx_queue(rx_queue, efx)
Ben Hutchings8ceee662008-04-27 12:55:59 +01001055 cancel_delayed_work_sync(&rx_queue->work);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001056
1057 /* Stop scheduled port reconfigurations */
1058 cancel_work_sync(&efx->reconfigure_work);
1059
1060}
1061
1062/* Quiesce hardware and software without bringing the link down.
1063 * Safe to call multiple times, when the nic and interface is in any
1064 * state. The caller is guaranteed to subsequently be in a position
1065 * to modify any hardware and software state they see fit without
1066 * taking locks. */
1067static void efx_stop_all(struct efx_nic *efx)
1068{
1069 struct efx_channel *channel;
1070
1071 EFX_ASSERT_RESET_SERIALISED(efx);
1072
1073 /* port_enabled can be read safely under the rtnl lock */
1074 if (!efx->port_enabled)
1075 return;
1076
1077 /* Disable interrupts and wait for ISR to complete */
1078 falcon_disable_interrupts(efx);
1079 if (efx->legacy_irq)
1080 synchronize_irq(efx->legacy_irq);
Ben Hutchings64ee3122008-09-01 12:47:38 +01001081 efx_for_each_channel(channel, efx) {
Ben Hutchings8ceee662008-04-27 12:55:59 +01001082 if (channel->irq)
1083 synchronize_irq(channel->irq);
Ben Hutchingsb3475642008-05-16 21:15:49 +01001084 }
Ben Hutchings8ceee662008-04-27 12:55:59 +01001085
1086 /* Stop all NAPI processing and synchronous rx refills */
1087 efx_for_each_channel(channel, efx)
1088 efx_stop_channel(channel);
1089
1090 /* Stop all asynchronous port reconfigurations. Since all
1091 * event processing has already been stopped, there is no
1092 * window to loose phy events */
1093 efx_stop_port(efx);
1094
1095 /* Flush reconfigure_work, refill_workqueue, monitor_work */
1096 efx_flush_all(efx);
1097
1098 /* Isolate the MAC from the TX and RX engines, so that queue
1099 * flushes will complete in a timely fashion. */
1100 falcon_deconfigure_mac_wrapper(efx);
1101 falcon_drain_tx_fifo(efx);
1102
1103 /* Stop the kernel transmit interface late, so the watchdog
1104 * timer isn't ticking over the flush */
1105 efx_stop_queue(efx);
Ben Hutchings55668612008-05-16 21:16:10 +01001106 if (efx_dev_registered(efx)) {
Ben Hutchings8ceee662008-04-27 12:55:59 +01001107 netif_tx_lock_bh(efx->net_dev);
1108 netif_tx_unlock_bh(efx->net_dev);
1109 }
1110}
1111
1112static void efx_remove_all(struct efx_nic *efx)
1113{
1114 struct efx_channel *channel;
1115
1116 efx_for_each_channel(channel, efx)
1117 efx_remove_channel(channel);
1118 efx_remove_port(efx);
1119 efx_remove_nic(efx);
1120}
1121
1122/* A convinience function to safely flush all the queues */
1123int efx_flush_queues(struct efx_nic *efx)
1124{
1125 int rc;
1126
1127 EFX_ASSERT_RESET_SERIALISED(efx);
1128
1129 efx_stop_all(efx);
1130
1131 efx_fini_channels(efx);
1132 rc = efx_init_channels(efx);
1133 if (rc) {
1134 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1135 return rc;
1136 }
1137
1138 efx_start_all(efx);
1139
1140 return 0;
1141}
1142
1143/**************************************************************************
1144 *
1145 * Interrupt moderation
1146 *
1147 **************************************************************************/
1148
1149/* Set interrupt moderation parameters */
1150void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs)
1151{
1152 struct efx_tx_queue *tx_queue;
1153 struct efx_rx_queue *rx_queue;
1154
1155 EFX_ASSERT_RESET_SERIALISED(efx);
1156
1157 efx_for_each_tx_queue(tx_queue, efx)
1158 tx_queue->channel->irq_moderation = tx_usecs;
1159
1160 efx_for_each_rx_queue(rx_queue, efx)
1161 rx_queue->channel->irq_moderation = rx_usecs;
1162}
1163
1164/**************************************************************************
1165 *
1166 * Hardware monitor
1167 *
1168 **************************************************************************/
1169
1170/* Run periodically off the general workqueue. Serialised against
1171 * efx_reconfigure_port via the mac_lock */
1172static void efx_monitor(struct work_struct *data)
1173{
1174 struct efx_nic *efx = container_of(data, struct efx_nic,
1175 monitor_work.work);
1176 int rc = 0;
1177
1178 EFX_TRACE(efx, "hardware monitor executing on CPU %d\n",
1179 raw_smp_processor_id());
1180
1181
1182 /* If the mac_lock is already held then it is likely a port
1183 * reconfiguration is already in place, which will likely do
1184 * most of the work of check_hw() anyway. */
1185 if (!mutex_trylock(&efx->mac_lock)) {
1186 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1187 efx_monitor_interval);
1188 return;
1189 }
1190
1191 if (efx->port_enabled)
1192 rc = falcon_check_xmac(efx);
1193 mutex_unlock(&efx->mac_lock);
1194
1195 if (rc) {
1196 if (monitor_reset) {
1197 EFX_ERR(efx, "hardware monitor detected a fault: "
1198 "triggering reset\n");
1199 efx_schedule_reset(efx, RESET_TYPE_MONITOR);
1200 } else {
1201 EFX_ERR(efx, "hardware monitor detected a fault, "
1202 "skipping reset\n");
1203 }
1204 }
1205
1206 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1207 efx_monitor_interval);
1208}
1209
1210/**************************************************************************
1211 *
1212 * ioctls
1213 *
1214 *************************************************************************/
1215
1216/* Net device ioctl
1217 * Context: process, rtnl_lock() held.
1218 */
1219static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
1220{
Ben Hutchings767e4682008-09-01 12:43:14 +01001221 struct efx_nic *efx = netdev_priv(net_dev);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001222
1223 EFX_ASSERT_RESET_SERIALISED(efx);
1224
1225 return generic_mii_ioctl(&efx->mii, if_mii(ifr), cmd, NULL);
1226}
1227
1228/**************************************************************************
1229 *
1230 * NAPI interface
1231 *
1232 **************************************************************************/
1233
1234static int efx_init_napi(struct efx_nic *efx)
1235{
1236 struct efx_channel *channel;
1237 int rc;
1238
1239 efx_for_each_channel(channel, efx) {
1240 channel->napi_dev = efx->net_dev;
1241 rc = efx_lro_init(&channel->lro_mgr, efx);
1242 if (rc)
1243 goto err;
1244 }
1245 return 0;
1246 err:
1247 efx_fini_napi(efx);
1248 return rc;
1249}
1250
1251static void efx_fini_napi(struct efx_nic *efx)
1252{
1253 struct efx_channel *channel;
1254
1255 efx_for_each_channel(channel, efx) {
1256 efx_lro_fini(&channel->lro_mgr);
1257 channel->napi_dev = NULL;
1258 }
1259}
1260
1261/**************************************************************************
1262 *
1263 * Kernel netpoll interface
1264 *
1265 *************************************************************************/
1266
1267#ifdef CONFIG_NET_POLL_CONTROLLER
1268
1269/* Although in the common case interrupts will be disabled, this is not
1270 * guaranteed. However, all our work happens inside the NAPI callback,
1271 * so no locking is required.
1272 */
1273static void efx_netpoll(struct net_device *net_dev)
1274{
Ben Hutchings767e4682008-09-01 12:43:14 +01001275 struct efx_nic *efx = netdev_priv(net_dev);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001276 struct efx_channel *channel;
1277
Ben Hutchings64ee3122008-09-01 12:47:38 +01001278 efx_for_each_channel(channel, efx)
Ben Hutchings8ceee662008-04-27 12:55:59 +01001279 efx_schedule_channel(channel);
1280}
1281
1282#endif
1283
1284/**************************************************************************
1285 *
1286 * Kernel net device interface
1287 *
1288 *************************************************************************/
1289
1290/* Context: process, rtnl_lock() held. */
1291static int efx_net_open(struct net_device *net_dev)
1292{
Ben Hutchings767e4682008-09-01 12:43:14 +01001293 struct efx_nic *efx = netdev_priv(net_dev);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001294 EFX_ASSERT_RESET_SERIALISED(efx);
1295
1296 EFX_LOG(efx, "opening device %s on CPU %d\n", net_dev->name,
1297 raw_smp_processor_id());
1298
1299 efx_start_all(efx);
1300 return 0;
1301}
1302
1303/* Context: process, rtnl_lock() held.
1304 * Note that the kernel will ignore our return code; this method
1305 * should really be a void.
1306 */
1307static int efx_net_stop(struct net_device *net_dev)
1308{
Ben Hutchings767e4682008-09-01 12:43:14 +01001309 struct efx_nic *efx = netdev_priv(net_dev);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001310 int rc;
1311
1312 EFX_LOG(efx, "closing %s on CPU %d\n", net_dev->name,
1313 raw_smp_processor_id());
1314
1315 /* Stop the device and flush all the channels */
1316 efx_stop_all(efx);
1317 efx_fini_channels(efx);
1318 rc = efx_init_channels(efx);
1319 if (rc)
1320 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1321
1322 return 0;
1323}
1324
Ben Hutchings5b9e2072008-05-16 21:18:14 +01001325/* Context: process, dev_base_lock or RTNL held, non-blocking. */
Ben Hutchings8ceee662008-04-27 12:55:59 +01001326static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
1327{
Ben Hutchings767e4682008-09-01 12:43:14 +01001328 struct efx_nic *efx = netdev_priv(net_dev);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001329 struct efx_mac_stats *mac_stats = &efx->mac_stats;
1330 struct net_device_stats *stats = &net_dev->stats;
1331
Ben Hutchings5b9e2072008-05-16 21:18:14 +01001332 /* Update stats if possible, but do not wait if another thread
1333 * is updating them (or resetting the NIC); slightly stale
1334 * stats are acceptable.
1335 */
Ben Hutchings8ceee662008-04-27 12:55:59 +01001336 if (!spin_trylock(&efx->stats_lock))
1337 return stats;
1338 if (efx->state == STATE_RUNNING) {
1339 falcon_update_stats_xmac(efx);
1340 falcon_update_nic_stats(efx);
1341 }
1342 spin_unlock(&efx->stats_lock);
1343
1344 stats->rx_packets = mac_stats->rx_packets;
1345 stats->tx_packets = mac_stats->tx_packets;
1346 stats->rx_bytes = mac_stats->rx_bytes;
1347 stats->tx_bytes = mac_stats->tx_bytes;
1348 stats->multicast = mac_stats->rx_multicast;
1349 stats->collisions = mac_stats->tx_collision;
1350 stats->rx_length_errors = (mac_stats->rx_gtjumbo +
1351 mac_stats->rx_length_error);
1352 stats->rx_over_errors = efx->n_rx_nodesc_drop_cnt;
1353 stats->rx_crc_errors = mac_stats->rx_bad;
1354 stats->rx_frame_errors = mac_stats->rx_align_error;
1355 stats->rx_fifo_errors = mac_stats->rx_overflow;
1356 stats->rx_missed_errors = mac_stats->rx_missed;
1357 stats->tx_window_errors = mac_stats->tx_late_collision;
1358
1359 stats->rx_errors = (stats->rx_length_errors +
1360 stats->rx_over_errors +
1361 stats->rx_crc_errors +
1362 stats->rx_frame_errors +
1363 stats->rx_fifo_errors +
1364 stats->rx_missed_errors +
1365 mac_stats->rx_symbol_error);
1366 stats->tx_errors = (stats->tx_window_errors +
1367 mac_stats->tx_bad);
1368
1369 return stats;
1370}
1371
1372/* Context: netif_tx_lock held, BHs disabled. */
1373static void efx_watchdog(struct net_device *net_dev)
1374{
Ben Hutchings767e4682008-09-01 12:43:14 +01001375 struct efx_nic *efx = netdev_priv(net_dev);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001376
1377 EFX_ERR(efx, "TX stuck with stop_count=%d port_enabled=%d: %s\n",
1378 atomic_read(&efx->netif_stop_count), efx->port_enabled,
1379 monitor_reset ? "resetting channels" : "skipping reset");
1380
1381 if (monitor_reset)
1382 efx_schedule_reset(efx, RESET_TYPE_MONITOR);
1383}
1384
1385
1386/* Context: process, rtnl_lock() held. */
1387static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
1388{
Ben Hutchings767e4682008-09-01 12:43:14 +01001389 struct efx_nic *efx = netdev_priv(net_dev);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001390 int rc = 0;
1391
1392 EFX_ASSERT_RESET_SERIALISED(efx);
1393
1394 if (new_mtu > EFX_MAX_MTU)
1395 return -EINVAL;
1396
1397 efx_stop_all(efx);
1398
1399 EFX_LOG(efx, "changing MTU to %d\n", new_mtu);
1400
1401 efx_fini_channels(efx);
1402 net_dev->mtu = new_mtu;
1403 rc = efx_init_channels(efx);
1404 if (rc)
1405 goto fail;
1406
1407 efx_start_all(efx);
1408 return rc;
1409
1410 fail:
1411 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1412 return rc;
1413}
1414
1415static int efx_set_mac_address(struct net_device *net_dev, void *data)
1416{
Ben Hutchings767e4682008-09-01 12:43:14 +01001417 struct efx_nic *efx = netdev_priv(net_dev);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001418 struct sockaddr *addr = data;
1419 char *new_addr = addr->sa_data;
1420
1421 EFX_ASSERT_RESET_SERIALISED(efx);
1422
1423 if (!is_valid_ether_addr(new_addr)) {
1424 DECLARE_MAC_BUF(mac);
1425 EFX_ERR(efx, "invalid ethernet MAC address requested: %s\n",
1426 print_mac(mac, new_addr));
1427 return -EINVAL;
1428 }
1429
1430 memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len);
1431
1432 /* Reconfigure the MAC */
1433 efx_reconfigure_port(efx);
1434
1435 return 0;
1436}
1437
1438/* Context: netif_tx_lock held, BHs disabled. */
1439static void efx_set_multicast_list(struct net_device *net_dev)
1440{
Ben Hutchings767e4682008-09-01 12:43:14 +01001441 struct efx_nic *efx = netdev_priv(net_dev);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001442 struct dev_mc_list *mc_list = net_dev->mc_list;
1443 union efx_multicast_hash *mc_hash = &efx->multicast_hash;
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +01001444 bool promiscuous;
Ben Hutchings8ceee662008-04-27 12:55:59 +01001445 u32 crc;
1446 int bit;
1447 int i;
1448
1449 /* Set per-MAC promiscuity flag and reconfigure MAC if necessary */
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +01001450 promiscuous = !!(net_dev->flags & IFF_PROMISC);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001451 if (efx->promiscuous != promiscuous) {
1452 efx->promiscuous = promiscuous;
1453 /* Close the window between efx_stop_port() and efx_flush_all()
1454 * by only queuing work when the port is enabled. */
1455 if (efx->port_enabled)
1456 queue_work(efx->workqueue, &efx->reconfigure_work);
1457 }
1458
1459 /* Build multicast hash table */
1460 if (promiscuous || (net_dev->flags & IFF_ALLMULTI)) {
1461 memset(mc_hash, 0xff, sizeof(*mc_hash));
1462 } else {
1463 memset(mc_hash, 0x00, sizeof(*mc_hash));
1464 for (i = 0; i < net_dev->mc_count; i++) {
1465 crc = ether_crc_le(ETH_ALEN, mc_list->dmi_addr);
1466 bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
1467 set_bit_le(bit, mc_hash->byte);
1468 mc_list = mc_list->next;
1469 }
1470 }
1471
1472 /* Create and activate new global multicast hash table */
1473 falcon_set_multicast_hash(efx);
1474}
1475
1476static int efx_netdev_event(struct notifier_block *this,
1477 unsigned long event, void *ptr)
1478{
Ben Hutchingsd3208b52008-05-16 21:20:00 +01001479 struct net_device *net_dev = ptr;
Ben Hutchings8ceee662008-04-27 12:55:59 +01001480
1481 if (net_dev->open == efx_net_open && event == NETDEV_CHANGENAME) {
Ben Hutchings767e4682008-09-01 12:43:14 +01001482 struct efx_nic *efx = netdev_priv(net_dev);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001483
1484 strcpy(efx->name, net_dev->name);
1485 }
1486
1487 return NOTIFY_DONE;
1488}
1489
1490static struct notifier_block efx_netdev_notifier = {
1491 .notifier_call = efx_netdev_event,
1492};
1493
1494static int efx_register_netdev(struct efx_nic *efx)
1495{
1496 struct net_device *net_dev = efx->net_dev;
1497 int rc;
1498
1499 net_dev->watchdog_timeo = 5 * HZ;
1500 net_dev->irq = efx->pci_dev->irq;
1501 net_dev->open = efx_net_open;
1502 net_dev->stop = efx_net_stop;
1503 net_dev->get_stats = efx_net_stats;
1504 net_dev->tx_timeout = &efx_watchdog;
1505 net_dev->hard_start_xmit = efx_hard_start_xmit;
1506 net_dev->do_ioctl = efx_ioctl;
1507 net_dev->change_mtu = efx_change_mtu;
1508 net_dev->set_mac_address = efx_set_mac_address;
1509 net_dev->set_multicast_list = efx_set_multicast_list;
1510#ifdef CONFIG_NET_POLL_CONTROLLER
1511 net_dev->poll_controller = efx_netpoll;
1512#endif
1513 SET_NETDEV_DEV(net_dev, &efx->pci_dev->dev);
1514 SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
1515
1516 /* Always start with carrier off; PHY events will detect the link */
1517 netif_carrier_off(efx->net_dev);
1518
1519 /* Clear MAC statistics */
1520 falcon_update_stats_xmac(efx);
1521 memset(&efx->mac_stats, 0, sizeof(efx->mac_stats));
1522
1523 rc = register_netdev(net_dev);
1524 if (rc) {
1525 EFX_ERR(efx, "could not register net dev\n");
1526 return rc;
1527 }
1528 strcpy(efx->name, net_dev->name);
1529
1530 return 0;
1531}
1532
1533static void efx_unregister_netdev(struct efx_nic *efx)
1534{
1535 struct efx_tx_queue *tx_queue;
1536
1537 if (!efx->net_dev)
1538 return;
1539
Ben Hutchings767e4682008-09-01 12:43:14 +01001540 BUG_ON(netdev_priv(efx->net_dev) != efx);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001541
1542 /* Free up any skbs still remaining. This has to happen before
1543 * we try to unregister the netdev as running their destructors
1544 * may be needed to get the device ref. count to 0. */
1545 efx_for_each_tx_queue(tx_queue, efx)
1546 efx_release_tx_buffers(tx_queue);
1547
Ben Hutchings55668612008-05-16 21:16:10 +01001548 if (efx_dev_registered(efx)) {
Ben Hutchings8ceee662008-04-27 12:55:59 +01001549 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
1550 unregister_netdev(efx->net_dev);
1551 }
1552}
1553
1554/**************************************************************************
1555 *
1556 * Device reset and suspend
1557 *
1558 **************************************************************************/
1559
1560/* The final hardware and software finalisation before reset. */
1561static int efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd)
1562{
1563 int rc;
1564
1565 EFX_ASSERT_RESET_SERIALISED(efx);
1566
1567 rc = falcon_xmac_get_settings(efx, ecmd);
1568 if (rc) {
1569 EFX_ERR(efx, "could not back up PHY settings\n");
1570 goto fail;
1571 }
1572
1573 efx_fini_channels(efx);
1574 return 0;
1575
1576 fail:
1577 return rc;
1578}
1579
1580/* The first part of software initialisation after a hardware reset
1581 * This function does not handle serialisation with the kernel, it
1582 * assumes the caller has done this */
1583static int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd)
1584{
1585 int rc;
1586
1587 rc = efx_init_channels(efx);
1588 if (rc)
1589 goto fail1;
1590
1591 /* Restore MAC and PHY settings. */
1592 rc = falcon_xmac_set_settings(efx, ecmd);
1593 if (rc) {
1594 EFX_ERR(efx, "could not restore PHY settings\n");
1595 goto fail2;
1596 }
1597
1598 return 0;
1599
1600 fail2:
1601 efx_fini_channels(efx);
1602 fail1:
1603 return rc;
1604}
1605
1606/* Reset the NIC as transparently as possible. Do not reset the PHY
1607 * Note that the reset may fail, in which case the card will be left
1608 * in a most-probably-unusable state.
1609 *
1610 * This function will sleep. You cannot reset from within an atomic
1611 * state; use efx_schedule_reset() instead.
1612 *
1613 * Grabs the rtnl_lock.
1614 */
1615static int efx_reset(struct efx_nic *efx)
1616{
1617 struct ethtool_cmd ecmd;
1618 enum reset_type method = efx->reset_pending;
1619 int rc;
1620
1621 /* Serialise with kernel interfaces */
1622 rtnl_lock();
1623
1624 /* If we're not RUNNING then don't reset. Leave the reset_pending
1625 * flag set so that efx_pci_probe_main will be retried */
1626 if (efx->state != STATE_RUNNING) {
1627 EFX_INFO(efx, "scheduled reset quenched. NIC not RUNNING\n");
1628 goto unlock_rtnl;
1629 }
1630
1631 efx->state = STATE_RESETTING;
1632 EFX_INFO(efx, "resetting (%d)\n", method);
1633
1634 /* The net_dev->get_stats handler is quite slow, and will fail
1635 * if a fetch is pending over reset. Serialise against it. */
1636 spin_lock(&efx->stats_lock);
1637 spin_unlock(&efx->stats_lock);
1638
1639 efx_stop_all(efx);
1640 mutex_lock(&efx->mac_lock);
1641
1642 rc = efx_reset_down(efx, &ecmd);
1643 if (rc)
1644 goto fail1;
1645
1646 rc = falcon_reset_hw(efx, method);
1647 if (rc) {
1648 EFX_ERR(efx, "failed to reset hardware\n");
1649 goto fail2;
1650 }
1651
1652 /* Allow resets to be rescheduled. */
1653 efx->reset_pending = RESET_TYPE_NONE;
1654
1655 /* Reinitialise bus-mastering, which may have been turned off before
1656 * the reset was scheduled. This is still appropriate, even in the
1657 * RESET_TYPE_DISABLE since this driver generally assumes the hardware
1658 * can respond to requests. */
1659 pci_set_master(efx->pci_dev);
1660
1661 /* Reinitialise device. This is appropriate in the RESET_TYPE_DISABLE
1662 * case so the driver can talk to external SRAM */
1663 rc = falcon_init_nic(efx);
1664 if (rc) {
1665 EFX_ERR(efx, "failed to initialise NIC\n");
1666 goto fail3;
1667 }
1668
1669 /* Leave device stopped if necessary */
1670 if (method == RESET_TYPE_DISABLE) {
1671 /* Reinitialise the device anyway so the driver unload sequence
1672 * can talk to the external SRAM */
Ben Hutchings91ad7572008-05-16 21:14:27 +01001673 falcon_init_nic(efx);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001674 rc = -EIO;
1675 goto fail4;
1676 }
1677
1678 rc = efx_reset_up(efx, &ecmd);
1679 if (rc)
1680 goto fail5;
1681
1682 mutex_unlock(&efx->mac_lock);
1683 EFX_LOG(efx, "reset complete\n");
1684
1685 efx->state = STATE_RUNNING;
1686 efx_start_all(efx);
1687
1688 unlock_rtnl:
1689 rtnl_unlock();
1690 return 0;
1691
1692 fail5:
1693 fail4:
1694 fail3:
1695 fail2:
1696 fail1:
1697 EFX_ERR(efx, "has been disabled\n");
1698 efx->state = STATE_DISABLED;
1699
1700 mutex_unlock(&efx->mac_lock);
1701 rtnl_unlock();
1702 efx_unregister_netdev(efx);
1703 efx_fini_port(efx);
1704 return rc;
1705}
1706
1707/* The worker thread exists so that code that cannot sleep can
1708 * schedule a reset for later.
1709 */
1710static void efx_reset_work(struct work_struct *data)
1711{
1712 struct efx_nic *nic = container_of(data, struct efx_nic, reset_work);
1713
1714 efx_reset(nic);
1715}
1716
1717void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
1718{
1719 enum reset_type method;
1720
1721 if (efx->reset_pending != RESET_TYPE_NONE) {
1722 EFX_INFO(efx, "quenching already scheduled reset\n");
1723 return;
1724 }
1725
1726 switch (type) {
1727 case RESET_TYPE_INVISIBLE:
1728 case RESET_TYPE_ALL:
1729 case RESET_TYPE_WORLD:
1730 case RESET_TYPE_DISABLE:
1731 method = type;
1732 break;
1733 case RESET_TYPE_RX_RECOVERY:
1734 case RESET_TYPE_RX_DESC_FETCH:
1735 case RESET_TYPE_TX_DESC_FETCH:
1736 case RESET_TYPE_TX_SKIP:
1737 method = RESET_TYPE_INVISIBLE;
1738 break;
1739 default:
1740 method = RESET_TYPE_ALL;
1741 break;
1742 }
1743
1744 if (method != type)
1745 EFX_LOG(efx, "scheduling reset (%d:%d)\n", type, method);
1746 else
1747 EFX_LOG(efx, "scheduling reset (%d)\n", method);
1748
1749 efx->reset_pending = method;
1750
Ben Hutchings8d9853d2008-07-18 19:01:20 +01001751 queue_work(efx->reset_workqueue, &efx->reset_work);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001752}
1753
1754/**************************************************************************
1755 *
1756 * List of NICs we support
1757 *
1758 **************************************************************************/
1759
1760/* PCI device ID table */
1761static struct pci_device_id efx_pci_table[] __devinitdata = {
1762 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID),
1763 .driver_data = (unsigned long) &falcon_a_nic_type},
1764 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID),
1765 .driver_data = (unsigned long) &falcon_b_nic_type},
1766 {0} /* end of list */
1767};
1768
1769/**************************************************************************
1770 *
1771 * Dummy PHY/MAC/Board operations
1772 *
1773 * Can be used where the MAC does not implement this operation
1774 * Needed so all function pointers are valid and do not have to be tested
1775 * before use
1776 *
1777 **************************************************************************/
1778int efx_port_dummy_op_int(struct efx_nic *efx)
1779{
1780 return 0;
1781}
1782void efx_port_dummy_op_void(struct efx_nic *efx) {}
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +01001783void efx_port_dummy_op_blink(struct efx_nic *efx, bool blink) {}
Ben Hutchings8ceee662008-04-27 12:55:59 +01001784
1785static struct efx_phy_operations efx_dummy_phy_operations = {
1786 .init = efx_port_dummy_op_int,
1787 .reconfigure = efx_port_dummy_op_void,
1788 .check_hw = efx_port_dummy_op_int,
1789 .fini = efx_port_dummy_op_void,
1790 .clear_interrupt = efx_port_dummy_op_void,
1791 .reset_xaui = efx_port_dummy_op_void,
1792};
1793
1794/* Dummy board operations */
1795static int efx_nic_dummy_op_int(struct efx_nic *nic)
1796{
1797 return 0;
1798}
1799
1800static struct efx_board efx_dummy_board_info = {
1801 .init = efx_nic_dummy_op_int,
1802 .init_leds = efx_port_dummy_op_int,
1803 .set_fault_led = efx_port_dummy_op_blink,
Ben Hutchings37b5a602008-05-30 22:27:04 +01001804 .fini = efx_port_dummy_op_void,
Ben Hutchings8ceee662008-04-27 12:55:59 +01001805};
1806
1807/**************************************************************************
1808 *
1809 * Data housekeeping
1810 *
1811 **************************************************************************/
1812
1813/* This zeroes out and then fills in the invariants in a struct
1814 * efx_nic (including all sub-structures).
1815 */
1816static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
1817 struct pci_dev *pci_dev, struct net_device *net_dev)
1818{
1819 struct efx_channel *channel;
1820 struct efx_tx_queue *tx_queue;
1821 struct efx_rx_queue *rx_queue;
1822 int i, rc;
1823
1824 /* Initialise common structures */
1825 memset(efx, 0, sizeof(*efx));
1826 spin_lock_init(&efx->biu_lock);
1827 spin_lock_init(&efx->phy_lock);
1828 INIT_WORK(&efx->reset_work, efx_reset_work);
1829 INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
1830 efx->pci_dev = pci_dev;
1831 efx->state = STATE_INIT;
1832 efx->reset_pending = RESET_TYPE_NONE;
1833 strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
1834 efx->board_info = efx_dummy_board_info;
1835
1836 efx->net_dev = net_dev;
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +01001837 efx->rx_checksum_enabled = true;
Ben Hutchings8ceee662008-04-27 12:55:59 +01001838 spin_lock_init(&efx->netif_stop_lock);
1839 spin_lock_init(&efx->stats_lock);
1840 mutex_init(&efx->mac_lock);
1841 efx->phy_op = &efx_dummy_phy_operations;
1842 efx->mii.dev = net_dev;
1843 INIT_WORK(&efx->reconfigure_work, efx_reconfigure_work);
1844 atomic_set(&efx->netif_stop_count, 1);
1845
1846 for (i = 0; i < EFX_MAX_CHANNELS; i++) {
1847 channel = &efx->channel[i];
1848 channel->efx = efx;
1849 channel->channel = i;
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +01001850 channel->work_pending = false;
Ben Hutchings8ceee662008-04-27 12:55:59 +01001851 }
Ben Hutchings60ac1062008-09-01 12:44:59 +01001852 for (i = 0; i < EFX_TX_QUEUE_COUNT; i++) {
Ben Hutchings8ceee662008-04-27 12:55:59 +01001853 tx_queue = &efx->tx_queue[i];
1854 tx_queue->efx = efx;
1855 tx_queue->queue = i;
1856 tx_queue->buffer = NULL;
1857 tx_queue->channel = &efx->channel[0]; /* for safety */
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001858 tx_queue->tso_headers_free = NULL;
Ben Hutchings8ceee662008-04-27 12:55:59 +01001859 }
1860 for (i = 0; i < EFX_MAX_RX_QUEUES; i++) {
1861 rx_queue = &efx->rx_queue[i];
1862 rx_queue->efx = efx;
1863 rx_queue->queue = i;
1864 rx_queue->channel = &efx->channel[0]; /* for safety */
1865 rx_queue->buffer = NULL;
1866 spin_lock_init(&rx_queue->add_lock);
1867 INIT_DELAYED_WORK(&rx_queue->work, efx_rx_work);
1868 }
1869
1870 efx->type = type;
1871
1872 /* Sanity-check NIC type */
1873 EFX_BUG_ON_PARANOID(efx->type->txd_ring_mask &
1874 (efx->type->txd_ring_mask + 1));
1875 EFX_BUG_ON_PARANOID(efx->type->rxd_ring_mask &
1876 (efx->type->rxd_ring_mask + 1));
1877 EFX_BUG_ON_PARANOID(efx->type->evq_size &
1878 (efx->type->evq_size - 1));
1879 /* As close as we can get to guaranteeing that we don't overflow */
1880 EFX_BUG_ON_PARANOID(efx->type->evq_size <
1881 (efx->type->txd_ring_mask + 1 +
1882 efx->type->rxd_ring_mask + 1));
1883 EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
1884
1885 /* Higher numbered interrupt modes are less capable! */
1886 efx->interrupt_mode = max(efx->type->max_interrupt_mode,
1887 interrupt_mode);
1888
1889 efx->workqueue = create_singlethread_workqueue("sfc_work");
1890 if (!efx->workqueue) {
1891 rc = -ENOMEM;
1892 goto fail1;
1893 }
1894
Ben Hutchings8d9853d2008-07-18 19:01:20 +01001895 efx->reset_workqueue = create_singlethread_workqueue("sfc_reset");
1896 if (!efx->reset_workqueue) {
1897 rc = -ENOMEM;
1898 goto fail2;
1899 }
1900
Ben Hutchings8ceee662008-04-27 12:55:59 +01001901 return 0;
1902
Ben Hutchings8d9853d2008-07-18 19:01:20 +01001903 fail2:
1904 destroy_workqueue(efx->workqueue);
1905 efx->workqueue = NULL;
1906
Ben Hutchings8ceee662008-04-27 12:55:59 +01001907 fail1:
1908 return rc;
1909}
1910
1911static void efx_fini_struct(struct efx_nic *efx)
1912{
Ben Hutchings8d9853d2008-07-18 19:01:20 +01001913 if (efx->reset_workqueue) {
1914 destroy_workqueue(efx->reset_workqueue);
1915 efx->reset_workqueue = NULL;
1916 }
Ben Hutchings8ceee662008-04-27 12:55:59 +01001917 if (efx->workqueue) {
1918 destroy_workqueue(efx->workqueue);
1919 efx->workqueue = NULL;
1920 }
1921}
1922
1923/**************************************************************************
1924 *
1925 * PCI interface
1926 *
1927 **************************************************************************/
1928
1929/* Main body of final NIC shutdown code
1930 * This is called only at module unload (or hotplug removal).
1931 */
1932static void efx_pci_remove_main(struct efx_nic *efx)
1933{
1934 EFX_ASSERT_RESET_SERIALISED(efx);
1935
1936 /* Skip everything if we never obtained a valid membase */
1937 if (!efx->membase)
1938 return;
1939
1940 efx_fini_channels(efx);
1941 efx_fini_port(efx);
1942
1943 /* Shutdown the board, then the NIC and board state */
Ben Hutchings37b5a602008-05-30 22:27:04 +01001944 efx->board_info.fini(efx);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001945 falcon_fini_interrupt(efx);
1946
1947 efx_fini_napi(efx);
1948 efx_remove_all(efx);
1949}
1950
1951/* Final NIC shutdown
1952 * This is called only at module unload (or hotplug removal).
1953 */
1954static void efx_pci_remove(struct pci_dev *pci_dev)
1955{
1956 struct efx_nic *efx;
1957
1958 efx = pci_get_drvdata(pci_dev);
1959 if (!efx)
1960 return;
1961
1962 /* Mark the NIC as fini, then stop the interface */
1963 rtnl_lock();
1964 efx->state = STATE_FINI;
1965 dev_close(efx->net_dev);
1966
1967 /* Allow any queued efx_resets() to complete */
1968 rtnl_unlock();
1969
1970 if (efx->membase == NULL)
1971 goto out;
1972
1973 efx_unregister_netdev(efx);
1974
1975 /* Wait for any scheduled resets to complete. No more will be
1976 * scheduled from this point because efx_stop_all() has been
1977 * called, we are no longer registered with driverlink, and
1978 * the net_device's have been removed. */
Ben Hutchings8d9853d2008-07-18 19:01:20 +01001979 flush_workqueue(efx->reset_workqueue);
Ben Hutchings8ceee662008-04-27 12:55:59 +01001980
1981 efx_pci_remove_main(efx);
1982
1983out:
1984 efx_fini_io(efx);
1985 EFX_LOG(efx, "shutdown successful\n");
1986
1987 pci_set_drvdata(pci_dev, NULL);
1988 efx_fini_struct(efx);
1989 free_netdev(efx->net_dev);
1990};
1991
1992/* Main body of NIC initialisation
1993 * This is called at module load (or hotplug insertion, theoretically).
1994 */
1995static int efx_pci_probe_main(struct efx_nic *efx)
1996{
1997 int rc;
1998
1999 /* Do start-of-day initialisation */
2000 rc = efx_probe_all(efx);
2001 if (rc)
2002 goto fail1;
2003
2004 rc = efx_init_napi(efx);
2005 if (rc)
2006 goto fail2;
2007
2008 /* Initialise the board */
2009 rc = efx->board_info.init(efx);
2010 if (rc) {
2011 EFX_ERR(efx, "failed to initialise board\n");
2012 goto fail3;
2013 }
2014
2015 rc = falcon_init_nic(efx);
2016 if (rc) {
2017 EFX_ERR(efx, "failed to initialise NIC\n");
2018 goto fail4;
2019 }
2020
2021 rc = efx_init_port(efx);
2022 if (rc) {
2023 EFX_ERR(efx, "failed to initialise port\n");
2024 goto fail5;
2025 }
2026
2027 rc = efx_init_channels(efx);
2028 if (rc)
2029 goto fail6;
2030
2031 rc = falcon_init_interrupt(efx);
2032 if (rc)
2033 goto fail7;
2034
2035 return 0;
2036
2037 fail7:
2038 efx_fini_channels(efx);
2039 fail6:
2040 efx_fini_port(efx);
2041 fail5:
2042 fail4:
2043 fail3:
2044 efx_fini_napi(efx);
2045 fail2:
2046 efx_remove_all(efx);
2047 fail1:
2048 return rc;
2049}
2050
2051/* NIC initialisation
2052 *
2053 * This is called at module load (or hotplug insertion,
2054 * theoretically). It sets up PCI mappings, tests and resets the NIC,
2055 * sets up and registers the network devices with the kernel and hooks
2056 * the interrupt service routine. It does not prepare the device for
2057 * transmission; this is left to the first time one of the network
2058 * interfaces is brought up (i.e. efx_net_open).
2059 */
2060static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2061 const struct pci_device_id *entry)
2062{
2063 struct efx_nic_type *type = (struct efx_nic_type *) entry->driver_data;
2064 struct net_device *net_dev;
2065 struct efx_nic *efx;
2066 int i, rc;
2067
2068 /* Allocate and initialise a struct net_device and struct efx_nic */
2069 net_dev = alloc_etherdev(sizeof(*efx));
2070 if (!net_dev)
2071 return -ENOMEM;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01002072 net_dev->features |= (NETIF_F_IP_CSUM | NETIF_F_SG |
2073 NETIF_F_HIGHDMA | NETIF_F_TSO);
Ben Hutchings8ceee662008-04-27 12:55:59 +01002074 if (lro)
2075 net_dev->features |= NETIF_F_LRO;
Ben Hutchings28506562008-09-01 12:46:54 +01002076 /* Mask for features that also apply to VLAN devices */
2077 net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
2078 NETIF_F_HIGHDMA);
Ben Hutchings767e4682008-09-01 12:43:14 +01002079 efx = netdev_priv(net_dev);
Ben Hutchings8ceee662008-04-27 12:55:59 +01002080 pci_set_drvdata(pci_dev, efx);
2081 rc = efx_init_struct(efx, type, pci_dev, net_dev);
2082 if (rc)
2083 goto fail1;
2084
2085 EFX_INFO(efx, "Solarflare Communications NIC detected\n");
2086
2087 /* Set up basic I/O (BAR mappings etc) */
2088 rc = efx_init_io(efx);
2089 if (rc)
2090 goto fail2;
2091
2092 /* No serialisation is required with the reset path because
2093 * we're in STATE_INIT. */
2094 for (i = 0; i < 5; i++) {
2095 rc = efx_pci_probe_main(efx);
2096 if (rc == 0)
2097 break;
2098
2099 /* Serialise against efx_reset(). No more resets will be
2100 * scheduled since efx_stop_all() has been called, and we
2101 * have not and never have been registered with either
2102 * the rtnetlink or driverlink layers. */
Ben Hutchings8d9853d2008-07-18 19:01:20 +01002103 flush_workqueue(efx->reset_workqueue);
Ben Hutchings8ceee662008-04-27 12:55:59 +01002104
2105 /* Retry if a recoverably reset event has been scheduled */
2106 if ((efx->reset_pending != RESET_TYPE_INVISIBLE) &&
2107 (efx->reset_pending != RESET_TYPE_ALL))
2108 goto fail3;
2109
2110 efx->reset_pending = RESET_TYPE_NONE;
2111 }
2112
2113 if (rc) {
2114 EFX_ERR(efx, "Could not reset NIC\n");
2115 goto fail4;
2116 }
2117
2118 /* Switch to the running state before we expose the device to
2119 * the OS. This is to ensure that the initial gathering of
2120 * MAC stats succeeds. */
2121 rtnl_lock();
2122 efx->state = STATE_RUNNING;
2123 rtnl_unlock();
2124
2125 rc = efx_register_netdev(efx);
2126 if (rc)
2127 goto fail5;
2128
2129 EFX_LOG(efx, "initialisation successful\n");
2130
2131 return 0;
2132
2133 fail5:
2134 efx_pci_remove_main(efx);
2135 fail4:
2136 fail3:
2137 efx_fini_io(efx);
2138 fail2:
2139 efx_fini_struct(efx);
2140 fail1:
2141 EFX_LOG(efx, "initialisation failed. rc=%d\n", rc);
2142 free_netdev(net_dev);
2143 return rc;
2144}
2145
2146static struct pci_driver efx_pci_driver = {
2147 .name = EFX_DRIVER_NAME,
2148 .id_table = efx_pci_table,
2149 .probe = efx_pci_probe,
2150 .remove = efx_pci_remove,
2151};
2152
2153/**************************************************************************
2154 *
2155 * Kernel module interface
2156 *
2157 *************************************************************************/
2158
2159module_param(interrupt_mode, uint, 0444);
2160MODULE_PARM_DESC(interrupt_mode,
2161 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
2162
2163static int __init efx_init_module(void)
2164{
2165 int rc;
2166
2167 printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n");
2168
2169 rc = register_netdevice_notifier(&efx_netdev_notifier);
2170 if (rc)
2171 goto err_notifier;
2172
2173 refill_workqueue = create_workqueue("sfc_refill");
2174 if (!refill_workqueue) {
2175 rc = -ENOMEM;
2176 goto err_refill;
2177 }
2178
2179 rc = pci_register_driver(&efx_pci_driver);
2180 if (rc < 0)
2181 goto err_pci;
2182
2183 return 0;
2184
2185 err_pci:
2186 destroy_workqueue(refill_workqueue);
2187 err_refill:
2188 unregister_netdevice_notifier(&efx_netdev_notifier);
2189 err_notifier:
2190 return rc;
2191}
2192
2193static void __exit efx_exit_module(void)
2194{
2195 printk(KERN_INFO "Solarflare NET driver unloading\n");
2196
2197 pci_unregister_driver(&efx_pci_driver);
2198 destroy_workqueue(refill_workqueue);
2199 unregister_netdevice_notifier(&efx_netdev_notifier);
2200
2201}
2202
2203module_init(efx_init_module);
2204module_exit(efx_exit_module);
2205
2206MODULE_AUTHOR("Michael Brown <mbrown@fensystems.co.uk> and "
2207 "Solarflare Communications");
2208MODULE_DESCRIPTION("Solarflare Communications network driver");
2209MODULE_LICENSE("GPL");
2210MODULE_DEVICE_TABLE(pci, efx_pci_table);