blob: 33d6f47a1bf16863b08ddd9a4b83e9082f2d91d4 [file] [log] [blame]
Alexander Duyckb3890e32014-09-20 19:46:05 -04001/* Intel Ethernet Switch Host Interface Driver
2 * Copyright(c) 2013 - 2014 Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * The full GNU General Public License is included in this distribution in
14 * the file called "COPYING".
15 *
16 * Contact Information:
17 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
18 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
19 */
20
21#include <linux/module.h>
22
23#include "fm10k.h"
24
Alexander Duyck0e7b3642014-09-20 19:48:10 -040025static const struct fm10k_info *fm10k_info_tbl[] = {
26 [fm10k_device_pf] = &fm10k_pf_info,
27};
28
Alexander Duyckb3890e32014-09-20 19:46:05 -040029/**
30 * fm10k_pci_tbl - PCI Device ID Table
31 *
32 * Wildcard entries (PCI_ANY_ID) should come last
33 * Last entry must be all 0s
34 *
35 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
36 * Class, Class Mask, private data (not used) }
37 */
38static const struct pci_device_id fm10k_pci_tbl[] = {
Alexander Duyck0e7b3642014-09-20 19:48:10 -040039 { PCI_VDEVICE(INTEL, FM10K_DEV_ID_PF), fm10k_device_pf },
Alexander Duyckb3890e32014-09-20 19:46:05 -040040 /* required last entry */
41 { 0, }
42};
43MODULE_DEVICE_TABLE(pci, fm10k_pci_tbl);
44
Alexander Duyck04a5aef2014-09-20 19:46:45 -040045u16 fm10k_read_pci_cfg_word(struct fm10k_hw *hw, u32 reg)
46{
47 struct fm10k_intfc *interface = hw->back;
48 u16 value = 0;
49
50 if (FM10K_REMOVED(hw->hw_addr))
51 return ~value;
52
53 pci_read_config_word(interface->pdev, reg, &value);
54 if (value == 0xFFFF)
55 fm10k_write_flush(hw);
56
57 return value;
58}
59
60u32 fm10k_read_reg(struct fm10k_hw *hw, int reg)
61{
62 u32 __iomem *hw_addr = ACCESS_ONCE(hw->hw_addr);
63 u32 value = 0;
64
65 if (FM10K_REMOVED(hw_addr))
66 return ~value;
67
68 value = readl(&hw_addr[reg]);
Alexander Duyck0e7b3642014-09-20 19:48:10 -040069 if (!(~value) && (!reg || !(~readl(hw_addr)))) {
70 struct fm10k_intfc *interface = hw->back;
71 struct net_device *netdev = interface->netdev;
72
Alexander Duyck04a5aef2014-09-20 19:46:45 -040073 hw->hw_addr = NULL;
Alexander Duyck0e7b3642014-09-20 19:48:10 -040074 netif_device_detach(netdev);
75 netdev_err(netdev, "PCIe link lost, device now detached\n");
76 }
Alexander Duyck04a5aef2014-09-20 19:46:45 -040077
78 return value;
79}
80
Alexander Duyck0e7b3642014-09-20 19:48:10 -040081static int fm10k_hw_ready(struct fm10k_intfc *interface)
82{
83 struct fm10k_hw *hw = &interface->hw;
84
85 fm10k_write_flush(hw);
86
87 return FM10K_REMOVED(hw->hw_addr) ? -ENODEV : 0;
88}
89
Alexander Duyckb7d85142014-09-20 19:49:25 -040090void fm10k_service_event_schedule(struct fm10k_intfc *interface)
91{
92 if (!test_bit(__FM10K_SERVICE_DISABLE, &interface->state) &&
93 !test_and_set_bit(__FM10K_SERVICE_SCHED, &interface->state))
94 schedule_work(&interface->service_task);
95}
96
97static void fm10k_service_event_complete(struct fm10k_intfc *interface)
98{
99 BUG_ON(!test_bit(__FM10K_SERVICE_SCHED, &interface->state));
100
101 /* flush memory to make sure state is correct before next watchog */
102 smp_mb__before_atomic();
103 clear_bit(__FM10K_SERVICE_SCHED, &interface->state);
104}
105
106/**
107 * fm10k_service_timer - Timer Call-back
108 * @data: pointer to interface cast into an unsigned long
109 **/
110static void fm10k_service_timer(unsigned long data)
111{
112 struct fm10k_intfc *interface = (struct fm10k_intfc *)data;
113
114 /* Reset the timer */
115 mod_timer(&interface->service_timer, (HZ * 2) + jiffies);
116
117 fm10k_service_event_schedule(interface);
118}
119
120static void fm10k_detach_subtask(struct fm10k_intfc *interface)
121{
122 struct net_device *netdev = interface->netdev;
123
124 /* do nothing if device is still present or hw_addr is set */
125 if (netif_device_present(netdev) || interface->hw.hw_addr)
126 return;
127
128 rtnl_lock();
129
130 if (netif_running(netdev))
131 dev_close(netdev);
132
133 rtnl_unlock();
134}
135
136static void fm10k_reinit(struct fm10k_intfc *interface)
137{
138 struct net_device *netdev = interface->netdev;
139 struct fm10k_hw *hw = &interface->hw;
140 int err;
141
142 WARN_ON(in_interrupt());
143
144 /* put off any impending NetWatchDogTimeout */
145 netdev->trans_start = jiffies;
146
147 while (test_and_set_bit(__FM10K_RESETTING, &interface->state))
148 usleep_range(1000, 2000);
149
150 rtnl_lock();
151
152 if (netif_running(netdev))
153 fm10k_close(netdev);
154
155 fm10k_mbx_free_irq(interface);
156
157 /* delay any future reset requests */
158 interface->last_reset = jiffies + (10 * HZ);
159
160 /* reset and initialize the hardware so it is in a known state */
161 err = hw->mac.ops.reset_hw(hw) ? : hw->mac.ops.init_hw(hw);
162 if (err)
163 dev_err(&interface->pdev->dev, "init_hw failed: %d\n", err);
164
165 /* reassociate interrupts */
166 fm10k_mbx_request_irq(interface);
167
168 if (netif_running(netdev))
169 fm10k_open(netdev);
170
171 rtnl_unlock();
172
173 clear_bit(__FM10K_RESETTING, &interface->state);
174}
175
176static void fm10k_reset_subtask(struct fm10k_intfc *interface)
177{
178 if (!(interface->flags & FM10K_FLAG_RESET_REQUESTED))
179 return;
180
181 interface->flags &= ~FM10K_FLAG_RESET_REQUESTED;
182
183 netdev_err(interface->netdev, "Reset interface\n");
184 interface->tx_timeout_count++;
185
186 fm10k_reinit(interface);
187}
188
189/**
190 * fm10k_configure_swpri_map - Configure Receive SWPRI to PC mapping
191 * @interface: board private structure
192 *
193 * Configure the SWPRI to PC mapping for the port.
194 **/
195static void fm10k_configure_swpri_map(struct fm10k_intfc *interface)
196{
197 struct net_device *netdev = interface->netdev;
198 struct fm10k_hw *hw = &interface->hw;
199 int i;
200
201 /* clear flag indicating update is needed */
202 interface->flags &= ~FM10K_FLAG_SWPRI_CONFIG;
203
204 /* these registers are only available on the PF */
205 if (hw->mac.type != fm10k_mac_pf)
206 return;
207
208 /* configure SWPRI to PC map */
209 for (i = 0; i < FM10K_SWPRI_MAX; i++)
210 fm10k_write_reg(hw, FM10K_SWPRI_MAP(i),
211 netdev_get_prio_tc_map(netdev, i));
212}
213
214/**
215 * fm10k_watchdog_update_host_state - Update the link status based on host.
216 * @interface: board private structure
217 **/
218static void fm10k_watchdog_update_host_state(struct fm10k_intfc *interface)
219{
220 struct fm10k_hw *hw = &interface->hw;
221 s32 err;
222
223 if (test_bit(__FM10K_LINK_DOWN, &interface->state)) {
224 interface->host_ready = false;
225 if (time_is_after_jiffies(interface->link_down_event))
226 return;
227 clear_bit(__FM10K_LINK_DOWN, &interface->state);
228 }
229
230 if (interface->flags & FM10K_FLAG_SWPRI_CONFIG) {
231 if (rtnl_trylock()) {
232 fm10k_configure_swpri_map(interface);
233 rtnl_unlock();
234 }
235 }
236
237 /* lock the mailbox for transmit and receive */
238 fm10k_mbx_lock(interface);
239
240 err = hw->mac.ops.get_host_state(hw, &interface->host_ready);
241 if (err && time_is_before_jiffies(interface->last_reset))
242 interface->flags |= FM10K_FLAG_RESET_REQUESTED;
243
244 /* free the lock */
245 fm10k_mbx_unlock(interface);
246}
247
248/**
249 * fm10k_mbx_subtask - Process upstream and downstream mailboxes
250 * @interface: board private structure
251 *
252 * This function will process both the upstream and downstream mailboxes.
253 * It is necessary for us to hold the rtnl_lock while doing this as the
254 * mailbox accesses are protected by this lock.
255 **/
256static void fm10k_mbx_subtask(struct fm10k_intfc *interface)
257{
258 /* process upstream mailbox and update device state */
259 fm10k_watchdog_update_host_state(interface);
260}
261
262/**
263 * fm10k_watchdog_host_is_ready - Update netdev status based on host ready
264 * @interface: board private structure
265 **/
266static void fm10k_watchdog_host_is_ready(struct fm10k_intfc *interface)
267{
268 struct net_device *netdev = interface->netdev;
269
270 /* only continue if link state is currently down */
271 if (netif_carrier_ok(netdev))
272 return;
273
274 netif_info(interface, drv, netdev, "NIC Link is up\n");
275
276 netif_carrier_on(netdev);
277 netif_tx_wake_all_queues(netdev);
278}
279
280/**
281 * fm10k_watchdog_host_not_ready - Update netdev status based on host not ready
282 * @interface: board private structure
283 **/
284static void fm10k_watchdog_host_not_ready(struct fm10k_intfc *interface)
285{
286 struct net_device *netdev = interface->netdev;
287
288 /* only continue if link state is currently up */
289 if (!netif_carrier_ok(netdev))
290 return;
291
292 netif_info(interface, drv, netdev, "NIC Link is down\n");
293
294 netif_carrier_off(netdev);
295 netif_tx_stop_all_queues(netdev);
296}
297
298/**
299 * fm10k_update_stats - Update the board statistics counters.
300 * @interface: board private structure
301 **/
302void fm10k_update_stats(struct fm10k_intfc *interface)
303{
304 struct net_device_stats *net_stats = &interface->netdev->stats;
305 struct fm10k_hw *hw = &interface->hw;
306 u64 rx_errors = 0, rx_csum_errors = 0, tx_csum_errors = 0;
307 u64 restart_queue = 0, tx_busy = 0, alloc_failed = 0;
308 u64 rx_bytes_nic = 0, rx_pkts_nic = 0, rx_drops_nic = 0;
309 u64 tx_bytes_nic = 0, tx_pkts_nic = 0;
310 u64 bytes, pkts;
311 int i;
312
313 /* do not allow stats update via service task for next second */
314 interface->next_stats_update = jiffies + HZ;
315
316 /* gather some stats to the interface struct that are per queue */
317 for (bytes = 0, pkts = 0, i = 0; i < interface->num_tx_queues; i++) {
318 struct fm10k_ring *tx_ring = interface->tx_ring[i];
319
320 restart_queue += tx_ring->tx_stats.restart_queue;
321 tx_busy += tx_ring->tx_stats.tx_busy;
322 tx_csum_errors += tx_ring->tx_stats.csum_err;
323 bytes += tx_ring->stats.bytes;
324 pkts += tx_ring->stats.packets;
325 }
326
327 interface->restart_queue = restart_queue;
328 interface->tx_busy = tx_busy;
329 net_stats->tx_bytes = bytes;
330 net_stats->tx_packets = pkts;
331 interface->tx_csum_errors = tx_csum_errors;
332 /* gather some stats to the interface struct that are per queue */
333 for (bytes = 0, pkts = 0, i = 0; i < interface->num_rx_queues; i++) {
334 struct fm10k_ring *rx_ring = interface->rx_ring[i];
335
336 bytes += rx_ring->stats.bytes;
337 pkts += rx_ring->stats.packets;
338 alloc_failed += rx_ring->rx_stats.alloc_failed;
339 rx_csum_errors += rx_ring->rx_stats.csum_err;
340 rx_errors += rx_ring->rx_stats.errors;
341 }
342
343 net_stats->rx_bytes = bytes;
344 net_stats->rx_packets = pkts;
345 interface->alloc_failed = alloc_failed;
346 interface->rx_csum_errors = rx_csum_errors;
347 interface->rx_errors = rx_errors;
348
349 hw->mac.ops.update_hw_stats(hw, &interface->stats);
350
351 for (i = 0; i < FM10K_MAX_QUEUES_PF; i++) {
352 struct fm10k_hw_stats_q *q = &interface->stats.q[i];
353
354 tx_bytes_nic += q->tx_bytes.count;
355 tx_pkts_nic += q->tx_packets.count;
356 rx_bytes_nic += q->rx_bytes.count;
357 rx_pkts_nic += q->rx_packets.count;
358 rx_drops_nic += q->rx_drops.count;
359 }
360
361 interface->tx_bytes_nic = tx_bytes_nic;
362 interface->tx_packets_nic = tx_pkts_nic;
363 interface->rx_bytes_nic = rx_bytes_nic;
364 interface->rx_packets_nic = rx_pkts_nic;
365 interface->rx_drops_nic = rx_drops_nic;
366
367 /* Fill out the OS statistics structure */
368 net_stats->rx_errors = interface->stats.xec.count;
369 net_stats->rx_dropped = interface->stats.nodesc_drop.count;
370}
371
372/**
373 * fm10k_watchdog_flush_tx - flush queues on host not ready
374 * @interface - pointer to the device interface structure
375 **/
376static void fm10k_watchdog_flush_tx(struct fm10k_intfc *interface)
377{
378 int some_tx_pending = 0;
379 int i;
380
381 /* nothing to do if carrier is up */
382 if (netif_carrier_ok(interface->netdev))
383 return;
384
385 for (i = 0; i < interface->num_tx_queues; i++) {
386 struct fm10k_ring *tx_ring = interface->tx_ring[i];
387
388 if (tx_ring->next_to_use != tx_ring->next_to_clean) {
389 some_tx_pending = 1;
390 break;
391 }
392 }
393
394 /* We've lost link, so the controller stops DMA, but we've got
395 * queued Tx work that's never going to get done, so reset
396 * controller to flush Tx.
397 */
398 if (some_tx_pending)
399 interface->flags |= FM10K_FLAG_RESET_REQUESTED;
400}
401
402/**
403 * fm10k_watchdog_subtask - check and bring link up
404 * @interface - pointer to the device interface structure
405 **/
406static void fm10k_watchdog_subtask(struct fm10k_intfc *interface)
407{
408 /* if interface is down do nothing */
409 if (test_bit(__FM10K_DOWN, &interface->state) ||
410 test_bit(__FM10K_RESETTING, &interface->state))
411 return;
412
413 if (interface->host_ready)
414 fm10k_watchdog_host_is_ready(interface);
415 else
416 fm10k_watchdog_host_not_ready(interface);
417
418 /* update stats only once every second */
419 if (time_is_before_jiffies(interface->next_stats_update))
420 fm10k_update_stats(interface);
421
422 /* flush any uncompleted work */
423 fm10k_watchdog_flush_tx(interface);
424}
425
426/**
427 * fm10k_check_hang_subtask - check for hung queues and dropped interrupts
428 * @interface - pointer to the device interface structure
429 *
430 * This function serves two purposes. First it strobes the interrupt lines
431 * in order to make certain interrupts are occurring. Secondly it sets the
432 * bits needed to check for TX hangs. As a result we should immediately
433 * determine if a hang has occurred.
434 */
435static void fm10k_check_hang_subtask(struct fm10k_intfc *interface)
436{
437 int i;
438
439 /* If we're down or resetting, just bail */
440 if (test_bit(__FM10K_DOWN, &interface->state) ||
441 test_bit(__FM10K_RESETTING, &interface->state))
442 return;
443
444 /* rate limit tx hang checks to only once every 2 seconds */
445 if (time_is_after_eq_jiffies(interface->next_tx_hang_check))
446 return;
447 interface->next_tx_hang_check = jiffies + (2 * HZ);
448
449 if (netif_carrier_ok(interface->netdev)) {
450 /* Force detection of hung controller */
451 for (i = 0; i < interface->num_tx_queues; i++)
452 set_check_for_tx_hang(interface->tx_ring[i]);
453
454 /* Rearm all in-use q_vectors for immediate firing */
455 for (i = 0; i < interface->num_q_vectors; i++) {
456 struct fm10k_q_vector *qv = interface->q_vector[i];
457
458 if (!qv->tx.count && !qv->rx.count)
459 continue;
460 writel(FM10K_ITR_ENABLE | FM10K_ITR_PENDING2, qv->itr);
461 }
462 }
463}
464
465/**
466 * fm10k_service_task - manages and runs subtasks
467 * @work: pointer to work_struct containing our data
468 **/
469static void fm10k_service_task(struct work_struct *work)
470{
471 struct fm10k_intfc *interface;
472
473 interface = container_of(work, struct fm10k_intfc, service_task);
474
475 /* tasks always capable of running, but must be rtnl protected */
476 fm10k_mbx_subtask(interface);
477 fm10k_detach_subtask(interface);
478 fm10k_reset_subtask(interface);
479
480 /* tasks only run when interface is up */
481 fm10k_watchdog_subtask(interface);
482 fm10k_check_hang_subtask(interface);
483
484 /* release lock on service events to allow scheduling next event */
485 fm10k_service_event_complete(interface);
486}
487
Alexander Duyck18283ca2014-09-20 19:48:51 -0400488static void fm10k_napi_enable_all(struct fm10k_intfc *interface)
489{
490 struct fm10k_q_vector *q_vector;
491 int q_idx;
492
493 for (q_idx = 0; q_idx < interface->num_q_vectors; q_idx++) {
494 q_vector = interface->q_vector[q_idx];
495 napi_enable(&q_vector->napi);
496 }
497}
498
499static irqreturn_t fm10k_msix_clean_rings(int irq, void *data)
500{
501 struct fm10k_q_vector *q_vector = data;
502
503 if (q_vector->rx.count || q_vector->tx.count)
504 napi_schedule(&q_vector->napi);
505
506 return IRQ_HANDLED;
507}
508
509#define FM10K_ERR_MSG(type) case (type): error = #type; break
510static void fm10k_print_fault(struct fm10k_intfc *interface, int type,
511 struct fm10k_fault *fault)
512{
513 struct pci_dev *pdev = interface->pdev;
514 char *error;
515
516 switch (type) {
517 case FM10K_PCA_FAULT:
518 switch (fault->type) {
519 default:
520 error = "Unknown PCA error";
521 break;
522 FM10K_ERR_MSG(PCA_NO_FAULT);
523 FM10K_ERR_MSG(PCA_UNMAPPED_ADDR);
524 FM10K_ERR_MSG(PCA_BAD_QACCESS_PF);
525 FM10K_ERR_MSG(PCA_BAD_QACCESS_VF);
526 FM10K_ERR_MSG(PCA_MALICIOUS_REQ);
527 FM10K_ERR_MSG(PCA_POISONED_TLP);
528 FM10K_ERR_MSG(PCA_TLP_ABORT);
529 }
530 break;
531 case FM10K_THI_FAULT:
532 switch (fault->type) {
533 default:
534 error = "Unknown THI error";
535 break;
536 FM10K_ERR_MSG(THI_NO_FAULT);
537 FM10K_ERR_MSG(THI_MAL_DIS_Q_FAULT);
538 }
539 break;
540 case FM10K_FUM_FAULT:
541 switch (fault->type) {
542 default:
543 error = "Unknown FUM error";
544 break;
545 FM10K_ERR_MSG(FUM_NO_FAULT);
546 FM10K_ERR_MSG(FUM_UNMAPPED_ADDR);
547 FM10K_ERR_MSG(FUM_BAD_VF_QACCESS);
548 FM10K_ERR_MSG(FUM_ADD_DECODE_ERR);
549 FM10K_ERR_MSG(FUM_RO_ERROR);
550 FM10K_ERR_MSG(FUM_QPRC_CRC_ERROR);
551 FM10K_ERR_MSG(FUM_CSR_TIMEOUT);
552 FM10K_ERR_MSG(FUM_INVALID_TYPE);
553 FM10K_ERR_MSG(FUM_INVALID_LENGTH);
554 FM10K_ERR_MSG(FUM_INVALID_BE);
555 FM10K_ERR_MSG(FUM_INVALID_ALIGN);
556 }
557 break;
558 default:
559 error = "Undocumented fault";
560 break;
561 }
562
563 dev_warn(&pdev->dev,
564 "%s Address: 0x%llx SpecInfo: 0x%x Func: %02x.%0x\n",
565 error, fault->address, fault->specinfo,
566 PCI_SLOT(fault->func), PCI_FUNC(fault->func));
567}
568
569static void fm10k_report_fault(struct fm10k_intfc *interface, u32 eicr)
570{
571 struct fm10k_hw *hw = &interface->hw;
572 struct fm10k_fault fault = { 0 };
573 int type, err;
574
575 for (eicr &= FM10K_EICR_FAULT_MASK, type = FM10K_PCA_FAULT;
576 eicr;
577 eicr >>= 1, type += FM10K_FAULT_SIZE) {
578 /* only check if there is an error reported */
579 if (!(eicr & 0x1))
580 continue;
581
582 /* retrieve fault info */
583 err = hw->mac.ops.get_fault(hw, type, &fault);
584 if (err) {
585 dev_err(&interface->pdev->dev,
586 "error reading fault\n");
587 continue;
588 }
589
590 fm10k_print_fault(interface, type, &fault);
591 }
592}
593
594static void fm10k_reset_drop_on_empty(struct fm10k_intfc *interface, u32 eicr)
595{
596 struct fm10k_hw *hw = &interface->hw;
597 const u32 rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
598 u32 maxholdq;
599 int q;
600
601 if (!(eicr & FM10K_EICR_MAXHOLDTIME))
602 return;
603
604 maxholdq = fm10k_read_reg(hw, FM10K_MAXHOLDQ(7));
605 if (maxholdq)
606 fm10k_write_reg(hw, FM10K_MAXHOLDQ(7), maxholdq);
607 for (q = 255;;) {
608 if (maxholdq & (1 << 31)) {
609 if (q < FM10K_MAX_QUEUES_PF) {
610 interface->rx_overrun_pf++;
611 fm10k_write_reg(hw, FM10K_RXDCTL(q), rxdctl);
612 } else {
613 interface->rx_overrun_vf++;
614 }
615 }
616
617 maxholdq *= 2;
618 if (!maxholdq)
619 q &= ~(32 - 1);
620
621 if (!q)
622 break;
623
624 if (q-- % 32)
625 continue;
626
627 maxholdq = fm10k_read_reg(hw, FM10K_MAXHOLDQ(q / 32));
628 if (maxholdq)
629 fm10k_write_reg(hw, FM10K_MAXHOLDQ(q / 32), maxholdq);
630 }
631}
632
633static irqreturn_t fm10k_msix_mbx_pf(int irq, void *data)
634{
635 struct fm10k_intfc *interface = data;
636 struct fm10k_hw *hw = &interface->hw;
637 struct fm10k_mbx_info *mbx = &hw->mbx;
638 u32 eicr;
639
640 /* unmask any set bits related to this interrupt */
641 eicr = fm10k_read_reg(hw, FM10K_EICR);
642 fm10k_write_reg(hw, FM10K_EICR, eicr & (FM10K_EICR_MAILBOX |
643 FM10K_EICR_SWITCHREADY |
644 FM10K_EICR_SWITCHNOTREADY));
645
646 /* report any faults found to the message log */
647 fm10k_report_fault(interface, eicr);
648
649 /* reset any queues disabled due to receiver overrun */
650 fm10k_reset_drop_on_empty(interface, eicr);
651
652 /* service mailboxes */
653 if (fm10k_mbx_trylock(interface)) {
654 mbx->ops.process(hw, mbx);
655 fm10k_mbx_unlock(interface);
656 }
657
Alexander Duyckb7d85142014-09-20 19:49:25 -0400658 /* if switch toggled state we should reset GLORTs */
659 if (eicr & FM10K_EICR_SWITCHNOTREADY) {
660 /* force link down for at least 4 seconds */
661 interface->link_down_event = jiffies + (4 * HZ);
662 set_bit(__FM10K_LINK_DOWN, &interface->state);
663
664 /* reset dglort_map back to no config */
665 hw->mac.dglort_map = FM10K_DGLORTMAP_NONE;
666 }
667
668 /* we should validate host state after interrupt event */
669 hw->mac.get_host_state = 1;
670 fm10k_service_event_schedule(interface);
671
Alexander Duyck18283ca2014-09-20 19:48:51 -0400672 /* re-enable mailbox interrupt and indicate 20us delay */
673 fm10k_write_reg(hw, FM10K_ITR(FM10K_MBX_VECTOR),
674 FM10K_ITR_ENABLE | FM10K_MBX_INT_DELAY);
675
676 return IRQ_HANDLED;
677}
678
679void fm10k_mbx_free_irq(struct fm10k_intfc *interface)
680{
681 struct msix_entry *entry = &interface->msix_entries[FM10K_MBX_VECTOR];
682 struct fm10k_hw *hw = &interface->hw;
683 int itr_reg;
684
685 /* disconnect the mailbox */
686 hw->mbx.ops.disconnect(hw, &hw->mbx);
687
688 /* disable Mailbox cause */
689 if (hw->mac.type == fm10k_mac_pf) {
690 fm10k_write_reg(hw, FM10K_EIMR,
691 FM10K_EIMR_DISABLE(PCA_FAULT) |
692 FM10K_EIMR_DISABLE(FUM_FAULT) |
693 FM10K_EIMR_DISABLE(MAILBOX) |
694 FM10K_EIMR_DISABLE(SWITCHREADY) |
695 FM10K_EIMR_DISABLE(SWITCHNOTREADY) |
696 FM10K_EIMR_DISABLE(SRAMERROR) |
697 FM10K_EIMR_DISABLE(VFLR) |
698 FM10K_EIMR_DISABLE(MAXHOLDTIME));
699 itr_reg = FM10K_ITR(FM10K_MBX_VECTOR);
700 }
701
702 fm10k_write_reg(hw, itr_reg, FM10K_ITR_MASK_SET);
703
704 free_irq(entry->vector, interface);
705}
706
707/* generic error handler for mailbox issues */
708static s32 fm10k_mbx_error(struct fm10k_hw *hw, u32 **results,
709 struct fm10k_mbx_info *mbx)
710{
711 struct fm10k_intfc *interface;
712 struct pci_dev *pdev;
713
714 interface = container_of(hw, struct fm10k_intfc, hw);
715 pdev = interface->pdev;
716
717 dev_err(&pdev->dev, "Unknown message ID %u\n",
718 **results & FM10K_TLV_ID_MASK);
719
720 return 0;
721}
722
723static s32 fm10k_lport_map(struct fm10k_hw *hw, u32 **results,
724 struct fm10k_mbx_info *mbx)
725{
726 struct fm10k_intfc *interface;
727 u32 dglort_map = hw->mac.dglort_map;
728 s32 err;
729
730 err = fm10k_msg_lport_map_pf(hw, results, mbx);
731 if (err)
732 return err;
733
734 interface = container_of(hw, struct fm10k_intfc, hw);
735
736 /* we need to reset if port count was just updated */
737 if (dglort_map != hw->mac.dglort_map)
738 interface->flags |= FM10K_FLAG_RESET_REQUESTED;
739
740 return 0;
741}
742
743static s32 fm10k_update_pvid(struct fm10k_hw *hw, u32 **results,
744 struct fm10k_mbx_info *mbx)
745{
746 struct fm10k_intfc *interface;
747 u16 glort, pvid;
748 u32 pvid_update;
749 s32 err;
750
751 err = fm10k_tlv_attr_get_u32(results[FM10K_PF_ATTR_ID_UPDATE_PVID],
752 &pvid_update);
753 if (err)
754 return err;
755
756 /* extract values from the pvid update */
757 glort = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_GLORT);
758 pvid = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_PVID);
759
760 /* if glort is not valid return error */
761 if (!fm10k_glort_valid_pf(hw, glort))
762 return FM10K_ERR_PARAM;
763
764 /* verify VID is valid */
765 if (pvid >= FM10K_VLAN_TABLE_VID_MAX)
766 return FM10K_ERR_PARAM;
767
768 interface = container_of(hw, struct fm10k_intfc, hw);
769
770 /* we need to reset if default VLAN was just updated */
771 if (pvid != hw->mac.default_vid)
772 interface->flags |= FM10K_FLAG_RESET_REQUESTED;
773
774 hw->mac.default_vid = pvid;
775
776 return 0;
777}
778
779static const struct fm10k_msg_data pf_mbx_data[] = {
780 FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
781 FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
782 FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_lport_map),
783 FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
784 FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
785 FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_update_pvid),
786 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_mbx_error),
787};
788
789static int fm10k_mbx_request_irq_pf(struct fm10k_intfc *interface)
790{
791 struct msix_entry *entry = &interface->msix_entries[FM10K_MBX_VECTOR];
792 struct net_device *dev = interface->netdev;
793 struct fm10k_hw *hw = &interface->hw;
794 int err;
795
796 /* Use timer0 for interrupt moderation on the mailbox */
797 u32 mbx_itr = FM10K_INT_MAP_TIMER0 | entry->entry;
798 u32 other_itr = FM10K_INT_MAP_IMMEDIATE | entry->entry;
799
800 /* register mailbox handlers */
801 err = hw->mbx.ops.register_handlers(&hw->mbx, pf_mbx_data);
802 if (err)
803 return err;
804
805 /* request the IRQ */
806 err = request_irq(entry->vector, fm10k_msix_mbx_pf, 0,
807 dev->name, interface);
808 if (err) {
809 netif_err(interface, probe, dev,
810 "request_irq for msix_mbx failed: %d\n", err);
811 return err;
812 }
813
814 /* Enable interrupts w/ no moderation for "other" interrupts */
815 fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_PCIeFault), other_itr);
816 fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_SwitchUpDown), other_itr);
817 fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_SRAM), other_itr);
818 fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_MaxHoldTime), other_itr);
819 fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_VFLR), other_itr);
820
821 /* Enable interrupts w/ moderation for mailbox */
822 fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_Mailbox), mbx_itr);
823
824 /* Enable individual interrupt causes */
825 fm10k_write_reg(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) |
826 FM10K_EIMR_ENABLE(FUM_FAULT) |
827 FM10K_EIMR_ENABLE(MAILBOX) |
828 FM10K_EIMR_ENABLE(SWITCHREADY) |
829 FM10K_EIMR_ENABLE(SWITCHNOTREADY) |
830 FM10K_EIMR_ENABLE(SRAMERROR) |
831 FM10K_EIMR_ENABLE(VFLR) |
832 FM10K_EIMR_ENABLE(MAXHOLDTIME));
833
834 /* enable interrupt */
835 fm10k_write_reg(hw, FM10K_ITR(entry->entry), FM10K_ITR_ENABLE);
836
837 return 0;
838}
839
840int fm10k_mbx_request_irq(struct fm10k_intfc *interface)
841{
842 struct fm10k_hw *hw = &interface->hw;
843 int err;
844
845 /* enable Mailbox cause */
846 err = fm10k_mbx_request_irq_pf(interface);
847
848 /* connect mailbox */
849 if (!err)
850 err = hw->mbx.ops.connect(hw, &hw->mbx);
851
852 return err;
853}
854
855/**
856 * fm10k_qv_free_irq - release interrupts associated with queue vectors
857 * @interface: board private structure
858 *
859 * Release all interrupts associated with this interface
860 **/
861void fm10k_qv_free_irq(struct fm10k_intfc *interface)
862{
863 int vector = interface->num_q_vectors;
864 struct fm10k_hw *hw = &interface->hw;
865 struct msix_entry *entry;
866
867 entry = &interface->msix_entries[NON_Q_VECTORS(hw) + vector];
868
869 while (vector) {
870 struct fm10k_q_vector *q_vector;
871
872 vector--;
873 entry--;
874 q_vector = interface->q_vector[vector];
875
876 if (!q_vector->tx.count && !q_vector->rx.count)
877 continue;
878
879 /* disable interrupts */
880
881 writel(FM10K_ITR_MASK_SET, q_vector->itr);
882
883 free_irq(entry->vector, q_vector);
884 }
885}
886
887/**
888 * fm10k_qv_request_irq - initialize interrupts for queue vectors
889 * @interface: board private structure
890 *
891 * Attempts to configure interrupts using the best available
892 * capabilities of the hardware and kernel.
893 **/
894int fm10k_qv_request_irq(struct fm10k_intfc *interface)
895{
896 struct net_device *dev = interface->netdev;
897 struct fm10k_hw *hw = &interface->hw;
898 struct msix_entry *entry;
899 int ri = 0, ti = 0;
900 int vector, err;
901
902 entry = &interface->msix_entries[NON_Q_VECTORS(hw)];
903
904 for (vector = 0; vector < interface->num_q_vectors; vector++) {
905 struct fm10k_q_vector *q_vector = interface->q_vector[vector];
906
907 /* name the vector */
908 if (q_vector->tx.count && q_vector->rx.count) {
909 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
910 "%s-TxRx-%d", dev->name, ri++);
911 ti++;
912 } else if (q_vector->rx.count) {
913 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
914 "%s-rx-%d", dev->name, ri++);
915 } else if (q_vector->tx.count) {
916 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
917 "%s-tx-%d", dev->name, ti++);
918 } else {
919 /* skip this unused q_vector */
920 continue;
921 }
922
923 /* Assign ITR register to q_vector */
924 q_vector->itr = &interface->uc_addr[FM10K_ITR(entry->entry)];
925
926 /* request the IRQ */
927 err = request_irq(entry->vector, &fm10k_msix_clean_rings, 0,
928 q_vector->name, q_vector);
929 if (err) {
930 netif_err(interface, probe, dev,
931 "request_irq failed for MSIX interrupt Error: %d\n",
932 err);
933 goto err_out;
934 }
935
936 /* Enable q_vector */
937 writel(FM10K_ITR_ENABLE, q_vector->itr);
938
939 entry++;
940 }
941
942 return 0;
943
944err_out:
945 /* wind through the ring freeing all entries and vectors */
946 while (vector) {
947 struct fm10k_q_vector *q_vector;
948
949 entry--;
950 vector--;
951 q_vector = interface->q_vector[vector];
952
953 if (!q_vector->tx.count && !q_vector->rx.count)
954 continue;
955
956 /* disable interrupts */
957
958 writel(FM10K_ITR_MASK_SET, q_vector->itr);
959
960 free_irq(entry->vector, q_vector);
961 }
962
963 return err;
964}
965
Alexander Duyck504c5ea2014-09-20 19:48:29 -0400966void fm10k_up(struct fm10k_intfc *interface)
967{
968 struct fm10k_hw *hw = &interface->hw;
969
970 /* Enable Tx/Rx DMA */
971 hw->mac.ops.start_hw(hw);
972
973 /* configure interrupts */
974 hw->mac.ops.update_int_moderator(hw);
975
976 /* clear down bit to indicate we are ready to go */
977 clear_bit(__FM10K_DOWN, &interface->state);
978
Alexander Duyck18283ca2014-09-20 19:48:51 -0400979 /* enable polling cleanups */
980 fm10k_napi_enable_all(interface);
981
Alexander Duyck504c5ea2014-09-20 19:48:29 -0400982 /* re-establish Rx filters */
983 fm10k_restore_rx_state(interface);
984
985 /* enable transmits */
986 netif_tx_start_all_queues(interface->netdev);
Alexander Duyckb7d85142014-09-20 19:49:25 -0400987
988 /* kick off the service timer */
989 mod_timer(&interface->service_timer, jiffies);
Alexander Duyck504c5ea2014-09-20 19:48:29 -0400990}
991
Alexander Duyck18283ca2014-09-20 19:48:51 -0400992static void fm10k_napi_disable_all(struct fm10k_intfc *interface)
993{
994 struct fm10k_q_vector *q_vector;
995 int q_idx;
996
997 for (q_idx = 0; q_idx < interface->num_q_vectors; q_idx++) {
998 q_vector = interface->q_vector[q_idx];
999 napi_disable(&q_vector->napi);
1000 }
1001}
1002
Alexander Duyck504c5ea2014-09-20 19:48:29 -04001003void fm10k_down(struct fm10k_intfc *interface)
1004{
1005 struct net_device *netdev = interface->netdev;
1006 struct fm10k_hw *hw = &interface->hw;
1007
1008 /* signal that we are down to the interrupt handler and service task */
1009 set_bit(__FM10K_DOWN, &interface->state);
1010
1011 /* call carrier off first to avoid false dev_watchdog timeouts */
1012 netif_carrier_off(netdev);
1013
1014 /* disable transmits */
1015 netif_tx_stop_all_queues(netdev);
1016 netif_tx_disable(netdev);
1017
1018 /* reset Rx filters */
1019 fm10k_reset_rx_state(interface);
1020
1021 /* allow 10ms for device to quiesce */
1022 usleep_range(10000, 20000);
1023
Alexander Duyck18283ca2014-09-20 19:48:51 -04001024 /* disable polling routines */
1025 fm10k_napi_disable_all(interface);
1026
Alexander Duyckb7d85142014-09-20 19:49:25 -04001027 del_timer_sync(&interface->service_timer);
1028
1029 /* capture stats one last time before stopping interface */
1030 fm10k_update_stats(interface);
1031
Alexander Duyck504c5ea2014-09-20 19:48:29 -04001032 /* Disable DMA engine for Tx/Rx */
1033 hw->mac.ops.stop_hw(hw);
1034}
1035
Alexander Duyck0e7b3642014-09-20 19:48:10 -04001036/**
1037 * fm10k_sw_init - Initialize general software structures
1038 * @interface: host interface private structure to initialize
1039 *
1040 * fm10k_sw_init initializes the interface private data structure.
1041 * Fields are initialized based on PCI device information and
1042 * OS network device settings (MTU size).
1043 **/
1044static int fm10k_sw_init(struct fm10k_intfc *interface,
1045 const struct pci_device_id *ent)
1046{
1047 static const u32 seed[FM10K_RSSRK_SIZE] = { 0xda565a6d, 0xc20e5b25,
1048 0x3d256741, 0xb08fa343,
1049 0xcb2bcad0, 0xb4307bae,
1050 0xa32dcb77, 0x0cf23080,
1051 0x3bb7426a, 0xfa01acbe };
1052 const struct fm10k_info *fi = fm10k_info_tbl[ent->driver_data];
1053 struct fm10k_hw *hw = &interface->hw;
1054 struct pci_dev *pdev = interface->pdev;
1055 struct net_device *netdev = interface->netdev;
1056 unsigned int rss;
1057 int err;
1058
1059 /* initialize back pointer */
1060 hw->back = interface;
1061 hw->hw_addr = interface->uc_addr;
1062
1063 /* PCI config space info */
1064 hw->vendor_id = pdev->vendor;
1065 hw->device_id = pdev->device;
1066 hw->revision_id = pdev->revision;
1067 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1068 hw->subsystem_device_id = pdev->subsystem_device;
1069
1070 /* Setup hw api */
1071 memcpy(&hw->mac.ops, fi->mac_ops, sizeof(hw->mac.ops));
1072 hw->mac.type = fi->mac;
1073
1074 /* Set common capability flags and settings */
1075 rss = min_t(int, FM10K_MAX_RSS_INDICES, num_online_cpus());
1076 interface->ring_feature[RING_F_RSS].limit = rss;
1077 fi->get_invariants(hw);
1078
1079 /* pick up the PCIe bus settings for reporting later */
1080 if (hw->mac.ops.get_bus_info)
1081 hw->mac.ops.get_bus_info(hw);
1082
1083 /* limit the usable DMA range */
1084 if (hw->mac.ops.set_dma_mask)
1085 hw->mac.ops.set_dma_mask(hw, dma_get_mask(&pdev->dev));
1086
1087 /* update netdev with DMA restrictions */
1088 if (dma_get_mask(&pdev->dev) > DMA_BIT_MASK(32)) {
1089 netdev->features |= NETIF_F_HIGHDMA;
1090 netdev->vlan_features |= NETIF_F_HIGHDMA;
1091 }
1092
Alexander Duyckb7d85142014-09-20 19:49:25 -04001093 /* delay any future reset requests */
1094 interface->last_reset = jiffies + (10 * HZ);
1095
Alexander Duyck0e7b3642014-09-20 19:48:10 -04001096 /* reset and initialize the hardware so it is in a known state */
1097 err = hw->mac.ops.reset_hw(hw) ? : hw->mac.ops.init_hw(hw);
1098 if (err) {
1099 dev_err(&pdev->dev, "init_hw failed: %d\n", err);
1100 return err;
1101 }
1102
1103 /* initialize hardware statistics */
1104 hw->mac.ops.update_hw_stats(hw, &interface->stats);
1105
1106 /* Start with random Ethernet address */
1107 eth_random_addr(hw->mac.addr);
1108
1109 /* Initialize MAC address from hardware */
1110 err = hw->mac.ops.read_mac_addr(hw);
1111 if (err) {
1112 dev_warn(&pdev->dev,
1113 "Failed to obtain MAC address defaulting to random\n");
1114 /* tag address assignment as random */
1115 netdev->addr_assign_type |= NET_ADDR_RANDOM;
1116 }
1117
1118 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
1119 memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
1120
1121 if (!is_valid_ether_addr(netdev->perm_addr)) {
1122 dev_err(&pdev->dev, "Invalid MAC Address\n");
1123 return -EIO;
1124 }
1125
1126 /* Only the PF can support VXLAN and NVGRE offloads */
1127 if (hw->mac.type != fm10k_mac_pf) {
1128 netdev->hw_enc_features = 0;
1129 netdev->features &= ~NETIF_F_GSO_UDP_TUNNEL;
1130 netdev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL;
1131 }
1132
Alexander Duyckb7d85142014-09-20 19:49:25 -04001133 /* Initialize service timer and service task */
1134 set_bit(__FM10K_SERVICE_DISABLE, &interface->state);
1135 setup_timer(&interface->service_timer, &fm10k_service_timer,
1136 (unsigned long)interface);
1137 INIT_WORK(&interface->service_task, fm10k_service_task);
1138
Alexander Duycke27ef592014-09-20 19:49:03 -04001139 /* set default ring sizes */
1140 interface->tx_ring_count = FM10K_DEFAULT_TXD;
1141 interface->rx_ring_count = FM10K_DEFAULT_RXD;
1142
Alexander Duyck18283ca2014-09-20 19:48:51 -04001143 /* set default interrupt moderation */
1144 interface->tx_itr = FM10K_ITR_10K;
1145 interface->rx_itr = FM10K_ITR_ADAPTIVE | FM10K_ITR_20K;
1146
Alexander Duyck0e7b3642014-09-20 19:48:10 -04001147 /* initialize vxlan_port list */
1148 INIT_LIST_HEAD(&interface->vxlan_port);
1149
1150 /* initialize RSS key */
1151 memcpy(interface->rssrk, seed, sizeof(seed));
1152
1153 /* Start off interface as being down */
1154 set_bit(__FM10K_DOWN, &interface->state);
1155
1156 return 0;
1157}
1158
1159static void fm10k_slot_warn(struct fm10k_intfc *interface)
1160{
1161 struct device *dev = &interface->pdev->dev;
1162 struct fm10k_hw *hw = &interface->hw;
1163
1164 if (hw->mac.ops.is_slot_appropriate(hw))
1165 return;
1166
1167 dev_warn(dev,
1168 "For optimal performance, a %s %s slot is recommended.\n",
1169 (hw->bus_caps.width == fm10k_bus_width_pcie_x1 ? "x1" :
1170 hw->bus_caps.width == fm10k_bus_width_pcie_x4 ? "x4" :
1171 "x8"),
1172 (hw->bus_caps.speed == fm10k_bus_speed_2500 ? "2.5GT/s" :
1173 hw->bus_caps.speed == fm10k_bus_speed_5000 ? "5.0GT/s" :
1174 "8.0GT/s"));
1175 dev_warn(dev,
1176 "A slot with more lanes and/or higher speed is suggested.\n");
1177}
1178
Alexander Duyckb3890e32014-09-20 19:46:05 -04001179/**
1180 * fm10k_probe - Device Initialization Routine
1181 * @pdev: PCI device information struct
1182 * @ent: entry in fm10k_pci_tbl
1183 *
1184 * Returns 0 on success, negative on failure
1185 *
1186 * fm10k_probe initializes an interface identified by a pci_dev structure.
1187 * The OS initialization, configuring of the interface private structure,
1188 * and a hardware reset occur.
1189 **/
1190static int fm10k_probe(struct pci_dev *pdev,
1191 const struct pci_device_id *ent)
1192{
Alexander Duyck0e7b3642014-09-20 19:48:10 -04001193 struct net_device *netdev;
1194 struct fm10k_intfc *interface;
1195 struct fm10k_hw *hw;
Alexander Duyckb3890e32014-09-20 19:46:05 -04001196 int err;
1197 u64 dma_mask;
1198
1199 err = pci_enable_device_mem(pdev);
1200 if (err)
1201 return err;
1202
1203 /* By default fm10k only supports a 48 bit DMA mask */
1204 dma_mask = DMA_BIT_MASK(48) | dma_get_required_mask(&pdev->dev);
1205
1206 if ((dma_mask <= DMA_BIT_MASK(32)) ||
1207 dma_set_mask_and_coherent(&pdev->dev, dma_mask)) {
1208 dma_mask &= DMA_BIT_MASK(32);
1209
1210 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1211 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1212 if (err) {
1213 err = dma_set_coherent_mask(&pdev->dev,
1214 DMA_BIT_MASK(32));
1215 if (err) {
1216 dev_err(&pdev->dev,
1217 "No usable DMA configuration, aborting\n");
1218 goto err_dma;
1219 }
1220 }
1221 }
1222
1223 err = pci_request_selected_regions(pdev,
1224 pci_select_bars(pdev,
1225 IORESOURCE_MEM),
1226 fm10k_driver_name);
1227 if (err) {
1228 dev_err(&pdev->dev,
1229 "pci_request_selected_regions failed 0x%x\n", err);
1230 goto err_pci_reg;
1231 }
1232
1233 pci_set_master(pdev);
1234 pci_save_state(pdev);
1235
Alexander Duyck0e7b3642014-09-20 19:48:10 -04001236 netdev = fm10k_alloc_netdev();
1237 if (!netdev) {
1238 err = -ENOMEM;
1239 goto err_alloc_netdev;
1240 }
1241
1242 SET_NETDEV_DEV(netdev, &pdev->dev);
1243
1244 interface = netdev_priv(netdev);
1245 pci_set_drvdata(pdev, interface);
1246
1247 interface->netdev = netdev;
1248 interface->pdev = pdev;
1249 hw = &interface->hw;
1250
1251 interface->uc_addr = ioremap(pci_resource_start(pdev, 0),
1252 FM10K_UC_ADDR_SIZE);
1253 if (!interface->uc_addr) {
1254 err = -EIO;
1255 goto err_ioremap;
1256 }
1257
1258 err = fm10k_sw_init(interface, ent);
1259 if (err)
1260 goto err_sw_init;
1261
Alexander Duyck18283ca2014-09-20 19:48:51 -04001262 err = fm10k_init_queueing_scheme(interface);
1263 if (err)
1264 goto err_sw_init;
1265
1266 err = fm10k_mbx_request_irq(interface);
1267 if (err)
1268 goto err_mbx_interrupt;
1269
Alexander Duyck0e7b3642014-09-20 19:48:10 -04001270 /* final check of hardware state before registering the interface */
1271 err = fm10k_hw_ready(interface);
1272 if (err)
1273 goto err_register;
1274
1275 err = register_netdev(netdev);
1276 if (err)
1277 goto err_register;
1278
1279 /* carrier off reporting is important to ethtool even BEFORE open */
1280 netif_carrier_off(netdev);
1281
1282 /* stop all the transmit queues from transmitting until link is up */
1283 netif_tx_stop_all_queues(netdev);
1284
1285 /* print bus type/speed/width info */
1286 dev_info(&pdev->dev, "(PCI Express:%s Width: %s Payload: %s)\n",
1287 (hw->bus.speed == fm10k_bus_speed_8000 ? "8.0GT/s" :
1288 hw->bus.speed == fm10k_bus_speed_5000 ? "5.0GT/s" :
1289 hw->bus.speed == fm10k_bus_speed_2500 ? "2.5GT/s" :
1290 "Unknown"),
1291 (hw->bus.width == fm10k_bus_width_pcie_x8 ? "x8" :
1292 hw->bus.width == fm10k_bus_width_pcie_x4 ? "x4" :
1293 hw->bus.width == fm10k_bus_width_pcie_x1 ? "x1" :
1294 "Unknown"),
1295 (hw->bus.payload == fm10k_bus_payload_128 ? "128B" :
1296 hw->bus.payload == fm10k_bus_payload_256 ? "256B" :
1297 hw->bus.payload == fm10k_bus_payload_512 ? "512B" :
1298 "Unknown"));
1299
1300 /* print warning for non-optimal configurations */
1301 fm10k_slot_warn(interface);
1302
Alexander Duyckb7d85142014-09-20 19:49:25 -04001303 /* clear the service task disable bit to allow service task to start */
1304 clear_bit(__FM10K_SERVICE_DISABLE, &interface->state);
1305
Alexander Duyckb3890e32014-09-20 19:46:05 -04001306 return 0;
1307
Alexander Duyck0e7b3642014-09-20 19:48:10 -04001308err_register:
Alexander Duyck18283ca2014-09-20 19:48:51 -04001309 fm10k_mbx_free_irq(interface);
1310err_mbx_interrupt:
1311 fm10k_clear_queueing_scheme(interface);
Alexander Duyck0e7b3642014-09-20 19:48:10 -04001312err_sw_init:
1313 iounmap(interface->uc_addr);
1314err_ioremap:
1315 free_netdev(netdev);
1316err_alloc_netdev:
1317 pci_release_selected_regions(pdev,
1318 pci_select_bars(pdev, IORESOURCE_MEM));
Alexander Duyckb3890e32014-09-20 19:46:05 -04001319err_pci_reg:
1320err_dma:
1321 pci_disable_device(pdev);
1322 return err;
1323}
1324
1325/**
1326 * fm10k_remove - Device Removal Routine
1327 * @pdev: PCI device information struct
1328 *
1329 * fm10k_remove is called by the PCI subsystem to alert the driver
1330 * that it should release a PCI device. The could be caused by a
1331 * Hot-Plug event, or because the driver is going to be removed from
1332 * memory.
1333 **/
1334static void fm10k_remove(struct pci_dev *pdev)
1335{
Alexander Duyck0e7b3642014-09-20 19:48:10 -04001336 struct fm10k_intfc *interface = pci_get_drvdata(pdev);
1337 struct net_device *netdev = interface->netdev;
1338
Alexander Duyckb7d85142014-09-20 19:49:25 -04001339 set_bit(__FM10K_SERVICE_DISABLE, &interface->state);
1340 cancel_work_sync(&interface->service_task);
1341
Alexander Duyck0e7b3642014-09-20 19:48:10 -04001342 /* free netdev, this may bounce the interrupts due to setup_tc */
1343 if (netdev->reg_state == NETREG_REGISTERED)
1344 unregister_netdev(netdev);
1345
Alexander Duyck18283ca2014-09-20 19:48:51 -04001346 /* disable mailbox interrupt */
1347 fm10k_mbx_free_irq(interface);
1348
1349 /* free interrupts */
1350 fm10k_clear_queueing_scheme(interface);
1351
Alexander Duyck0e7b3642014-09-20 19:48:10 -04001352 iounmap(interface->uc_addr);
1353
1354 free_netdev(netdev);
1355
Alexander Duyckb3890e32014-09-20 19:46:05 -04001356 pci_release_selected_regions(pdev,
1357 pci_select_bars(pdev, IORESOURCE_MEM));
1358
1359 pci_disable_device(pdev);
1360}
1361
1362static struct pci_driver fm10k_driver = {
1363 .name = fm10k_driver_name,
1364 .id_table = fm10k_pci_tbl,
1365 .probe = fm10k_probe,
1366 .remove = fm10k_remove,
1367};
1368
1369/**
1370 * fm10k_register_pci_driver - register driver interface
1371 *
1372 * This funciton is called on module load in order to register the driver.
1373 **/
1374int fm10k_register_pci_driver(void)
1375{
1376 return pci_register_driver(&fm10k_driver);
1377}
1378
1379/**
1380 * fm10k_unregister_pci_driver - unregister driver interface
1381 *
1382 * This funciton is called on module unload in order to remove the driver.
1383 **/
1384void fm10k_unregister_pci_driver(void)
1385{
1386 pci_unregister_driver(&fm10k_driver);
1387}