blob: 7529a8498da99c772dbbef92fc9239fc2e1a8482 [file] [log] [blame]
Alexander Duyckb3890e32014-09-20 19:46:05 -04001/* Intel Ethernet Switch Host Interface Driver
2 * Copyright(c) 2013 - 2014 Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * The full GNU General Public License is included in this distribution in
14 * the file called "COPYING".
15 *
16 * Contact Information:
17 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
18 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
19 */
20
21#include <linux/module.h>
22
23#include "fm10k.h"
24
Alexander Duyck0e7b3642014-09-20 19:48:10 -040025static const struct fm10k_info *fm10k_info_tbl[] = {
26 [fm10k_device_pf] = &fm10k_pf_info,
27};
28
Alexander Duyckb3890e32014-09-20 19:46:05 -040029/**
30 * fm10k_pci_tbl - PCI Device ID Table
31 *
32 * Wildcard entries (PCI_ANY_ID) should come last
33 * Last entry must be all 0s
34 *
35 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
36 * Class, Class Mask, private data (not used) }
37 */
38static const struct pci_device_id fm10k_pci_tbl[] = {
Alexander Duyck0e7b3642014-09-20 19:48:10 -040039 { PCI_VDEVICE(INTEL, FM10K_DEV_ID_PF), fm10k_device_pf },
Alexander Duyckb3890e32014-09-20 19:46:05 -040040 /* required last entry */
41 { 0, }
42};
43MODULE_DEVICE_TABLE(pci, fm10k_pci_tbl);
44
Alexander Duyck04a5aef2014-09-20 19:46:45 -040045u16 fm10k_read_pci_cfg_word(struct fm10k_hw *hw, u32 reg)
46{
47 struct fm10k_intfc *interface = hw->back;
48 u16 value = 0;
49
50 if (FM10K_REMOVED(hw->hw_addr))
51 return ~value;
52
53 pci_read_config_word(interface->pdev, reg, &value);
54 if (value == 0xFFFF)
55 fm10k_write_flush(hw);
56
57 return value;
58}
59
60u32 fm10k_read_reg(struct fm10k_hw *hw, int reg)
61{
62 u32 __iomem *hw_addr = ACCESS_ONCE(hw->hw_addr);
63 u32 value = 0;
64
65 if (FM10K_REMOVED(hw_addr))
66 return ~value;
67
68 value = readl(&hw_addr[reg]);
Alexander Duyck0e7b3642014-09-20 19:48:10 -040069 if (!(~value) && (!reg || !(~readl(hw_addr)))) {
70 struct fm10k_intfc *interface = hw->back;
71 struct net_device *netdev = interface->netdev;
72
Alexander Duyck04a5aef2014-09-20 19:46:45 -040073 hw->hw_addr = NULL;
Alexander Duyck0e7b3642014-09-20 19:48:10 -040074 netif_device_detach(netdev);
75 netdev_err(netdev, "PCIe link lost, device now detached\n");
76 }
Alexander Duyck04a5aef2014-09-20 19:46:45 -040077
78 return value;
79}
80
Alexander Duyck0e7b3642014-09-20 19:48:10 -040081static int fm10k_hw_ready(struct fm10k_intfc *interface)
82{
83 struct fm10k_hw *hw = &interface->hw;
84
85 fm10k_write_flush(hw);
86
87 return FM10K_REMOVED(hw->hw_addr) ? -ENODEV : 0;
88}
89
Alexander Duyckb7d85142014-09-20 19:49:25 -040090void fm10k_service_event_schedule(struct fm10k_intfc *interface)
91{
92 if (!test_bit(__FM10K_SERVICE_DISABLE, &interface->state) &&
93 !test_and_set_bit(__FM10K_SERVICE_SCHED, &interface->state))
94 schedule_work(&interface->service_task);
95}
96
97static void fm10k_service_event_complete(struct fm10k_intfc *interface)
98{
99 BUG_ON(!test_bit(__FM10K_SERVICE_SCHED, &interface->state));
100
101 /* flush memory to make sure state is correct before next watchog */
102 smp_mb__before_atomic();
103 clear_bit(__FM10K_SERVICE_SCHED, &interface->state);
104}
105
106/**
107 * fm10k_service_timer - Timer Call-back
108 * @data: pointer to interface cast into an unsigned long
109 **/
110static void fm10k_service_timer(unsigned long data)
111{
112 struct fm10k_intfc *interface = (struct fm10k_intfc *)data;
113
114 /* Reset the timer */
115 mod_timer(&interface->service_timer, (HZ * 2) + jiffies);
116
117 fm10k_service_event_schedule(interface);
118}
119
120static void fm10k_detach_subtask(struct fm10k_intfc *interface)
121{
122 struct net_device *netdev = interface->netdev;
123
124 /* do nothing if device is still present or hw_addr is set */
125 if (netif_device_present(netdev) || interface->hw.hw_addr)
126 return;
127
128 rtnl_lock();
129
130 if (netif_running(netdev))
131 dev_close(netdev);
132
133 rtnl_unlock();
134}
135
136static void fm10k_reinit(struct fm10k_intfc *interface)
137{
138 struct net_device *netdev = interface->netdev;
139 struct fm10k_hw *hw = &interface->hw;
140 int err;
141
142 WARN_ON(in_interrupt());
143
144 /* put off any impending NetWatchDogTimeout */
145 netdev->trans_start = jiffies;
146
147 while (test_and_set_bit(__FM10K_RESETTING, &interface->state))
148 usleep_range(1000, 2000);
149
150 rtnl_lock();
151
152 if (netif_running(netdev))
153 fm10k_close(netdev);
154
155 fm10k_mbx_free_irq(interface);
156
157 /* delay any future reset requests */
158 interface->last_reset = jiffies + (10 * HZ);
159
160 /* reset and initialize the hardware so it is in a known state */
161 err = hw->mac.ops.reset_hw(hw) ? : hw->mac.ops.init_hw(hw);
162 if (err)
163 dev_err(&interface->pdev->dev, "init_hw failed: %d\n", err);
164
165 /* reassociate interrupts */
166 fm10k_mbx_request_irq(interface);
167
168 if (netif_running(netdev))
169 fm10k_open(netdev);
170
171 rtnl_unlock();
172
173 clear_bit(__FM10K_RESETTING, &interface->state);
174}
175
176static void fm10k_reset_subtask(struct fm10k_intfc *interface)
177{
178 if (!(interface->flags & FM10K_FLAG_RESET_REQUESTED))
179 return;
180
181 interface->flags &= ~FM10K_FLAG_RESET_REQUESTED;
182
183 netdev_err(interface->netdev, "Reset interface\n");
184 interface->tx_timeout_count++;
185
186 fm10k_reinit(interface);
187}
188
189/**
190 * fm10k_configure_swpri_map - Configure Receive SWPRI to PC mapping
191 * @interface: board private structure
192 *
193 * Configure the SWPRI to PC mapping for the port.
194 **/
195static void fm10k_configure_swpri_map(struct fm10k_intfc *interface)
196{
197 struct net_device *netdev = interface->netdev;
198 struct fm10k_hw *hw = &interface->hw;
199 int i;
200
201 /* clear flag indicating update is needed */
202 interface->flags &= ~FM10K_FLAG_SWPRI_CONFIG;
203
204 /* these registers are only available on the PF */
205 if (hw->mac.type != fm10k_mac_pf)
206 return;
207
208 /* configure SWPRI to PC map */
209 for (i = 0; i < FM10K_SWPRI_MAX; i++)
210 fm10k_write_reg(hw, FM10K_SWPRI_MAP(i),
211 netdev_get_prio_tc_map(netdev, i));
212}
213
214/**
215 * fm10k_watchdog_update_host_state - Update the link status based on host.
216 * @interface: board private structure
217 **/
218static void fm10k_watchdog_update_host_state(struct fm10k_intfc *interface)
219{
220 struct fm10k_hw *hw = &interface->hw;
221 s32 err;
222
223 if (test_bit(__FM10K_LINK_DOWN, &interface->state)) {
224 interface->host_ready = false;
225 if (time_is_after_jiffies(interface->link_down_event))
226 return;
227 clear_bit(__FM10K_LINK_DOWN, &interface->state);
228 }
229
230 if (interface->flags & FM10K_FLAG_SWPRI_CONFIG) {
231 if (rtnl_trylock()) {
232 fm10k_configure_swpri_map(interface);
233 rtnl_unlock();
234 }
235 }
236
237 /* lock the mailbox for transmit and receive */
238 fm10k_mbx_lock(interface);
239
240 err = hw->mac.ops.get_host_state(hw, &interface->host_ready);
241 if (err && time_is_before_jiffies(interface->last_reset))
242 interface->flags |= FM10K_FLAG_RESET_REQUESTED;
243
244 /* free the lock */
245 fm10k_mbx_unlock(interface);
246}
247
248/**
249 * fm10k_mbx_subtask - Process upstream and downstream mailboxes
250 * @interface: board private structure
251 *
252 * This function will process both the upstream and downstream mailboxes.
253 * It is necessary for us to hold the rtnl_lock while doing this as the
254 * mailbox accesses are protected by this lock.
255 **/
256static void fm10k_mbx_subtask(struct fm10k_intfc *interface)
257{
258 /* process upstream mailbox and update device state */
259 fm10k_watchdog_update_host_state(interface);
260}
261
262/**
263 * fm10k_watchdog_host_is_ready - Update netdev status based on host ready
264 * @interface: board private structure
265 **/
266static void fm10k_watchdog_host_is_ready(struct fm10k_intfc *interface)
267{
268 struct net_device *netdev = interface->netdev;
269
270 /* only continue if link state is currently down */
271 if (netif_carrier_ok(netdev))
272 return;
273
274 netif_info(interface, drv, netdev, "NIC Link is up\n");
275
276 netif_carrier_on(netdev);
277 netif_tx_wake_all_queues(netdev);
278}
279
280/**
281 * fm10k_watchdog_host_not_ready - Update netdev status based on host not ready
282 * @interface: board private structure
283 **/
284static void fm10k_watchdog_host_not_ready(struct fm10k_intfc *interface)
285{
286 struct net_device *netdev = interface->netdev;
287
288 /* only continue if link state is currently up */
289 if (!netif_carrier_ok(netdev))
290 return;
291
292 netif_info(interface, drv, netdev, "NIC Link is down\n");
293
294 netif_carrier_off(netdev);
295 netif_tx_stop_all_queues(netdev);
296}
297
298/**
299 * fm10k_update_stats - Update the board statistics counters.
300 * @interface: board private structure
301 **/
302void fm10k_update_stats(struct fm10k_intfc *interface)
303{
304 struct net_device_stats *net_stats = &interface->netdev->stats;
305 struct fm10k_hw *hw = &interface->hw;
306 u64 rx_errors = 0, rx_csum_errors = 0, tx_csum_errors = 0;
307 u64 restart_queue = 0, tx_busy = 0, alloc_failed = 0;
308 u64 rx_bytes_nic = 0, rx_pkts_nic = 0, rx_drops_nic = 0;
309 u64 tx_bytes_nic = 0, tx_pkts_nic = 0;
310 u64 bytes, pkts;
311 int i;
312
313 /* do not allow stats update via service task for next second */
314 interface->next_stats_update = jiffies + HZ;
315
316 /* gather some stats to the interface struct that are per queue */
317 for (bytes = 0, pkts = 0, i = 0; i < interface->num_tx_queues; i++) {
318 struct fm10k_ring *tx_ring = interface->tx_ring[i];
319
320 restart_queue += tx_ring->tx_stats.restart_queue;
321 tx_busy += tx_ring->tx_stats.tx_busy;
322 tx_csum_errors += tx_ring->tx_stats.csum_err;
323 bytes += tx_ring->stats.bytes;
324 pkts += tx_ring->stats.packets;
325 }
326
327 interface->restart_queue = restart_queue;
328 interface->tx_busy = tx_busy;
329 net_stats->tx_bytes = bytes;
330 net_stats->tx_packets = pkts;
331 interface->tx_csum_errors = tx_csum_errors;
332 /* gather some stats to the interface struct that are per queue */
333 for (bytes = 0, pkts = 0, i = 0; i < interface->num_rx_queues; i++) {
334 struct fm10k_ring *rx_ring = interface->rx_ring[i];
335
336 bytes += rx_ring->stats.bytes;
337 pkts += rx_ring->stats.packets;
338 alloc_failed += rx_ring->rx_stats.alloc_failed;
339 rx_csum_errors += rx_ring->rx_stats.csum_err;
340 rx_errors += rx_ring->rx_stats.errors;
341 }
342
343 net_stats->rx_bytes = bytes;
344 net_stats->rx_packets = pkts;
345 interface->alloc_failed = alloc_failed;
346 interface->rx_csum_errors = rx_csum_errors;
347 interface->rx_errors = rx_errors;
348
349 hw->mac.ops.update_hw_stats(hw, &interface->stats);
350
351 for (i = 0; i < FM10K_MAX_QUEUES_PF; i++) {
352 struct fm10k_hw_stats_q *q = &interface->stats.q[i];
353
354 tx_bytes_nic += q->tx_bytes.count;
355 tx_pkts_nic += q->tx_packets.count;
356 rx_bytes_nic += q->rx_bytes.count;
357 rx_pkts_nic += q->rx_packets.count;
358 rx_drops_nic += q->rx_drops.count;
359 }
360
361 interface->tx_bytes_nic = tx_bytes_nic;
362 interface->tx_packets_nic = tx_pkts_nic;
363 interface->rx_bytes_nic = rx_bytes_nic;
364 interface->rx_packets_nic = rx_pkts_nic;
365 interface->rx_drops_nic = rx_drops_nic;
366
367 /* Fill out the OS statistics structure */
368 net_stats->rx_errors = interface->stats.xec.count;
369 net_stats->rx_dropped = interface->stats.nodesc_drop.count;
370}
371
372/**
373 * fm10k_watchdog_flush_tx - flush queues on host not ready
374 * @interface - pointer to the device interface structure
375 **/
376static void fm10k_watchdog_flush_tx(struct fm10k_intfc *interface)
377{
378 int some_tx_pending = 0;
379 int i;
380
381 /* nothing to do if carrier is up */
382 if (netif_carrier_ok(interface->netdev))
383 return;
384
385 for (i = 0; i < interface->num_tx_queues; i++) {
386 struct fm10k_ring *tx_ring = interface->tx_ring[i];
387
388 if (tx_ring->next_to_use != tx_ring->next_to_clean) {
389 some_tx_pending = 1;
390 break;
391 }
392 }
393
394 /* We've lost link, so the controller stops DMA, but we've got
395 * queued Tx work that's never going to get done, so reset
396 * controller to flush Tx.
397 */
398 if (some_tx_pending)
399 interface->flags |= FM10K_FLAG_RESET_REQUESTED;
400}
401
402/**
403 * fm10k_watchdog_subtask - check and bring link up
404 * @interface - pointer to the device interface structure
405 **/
406static void fm10k_watchdog_subtask(struct fm10k_intfc *interface)
407{
408 /* if interface is down do nothing */
409 if (test_bit(__FM10K_DOWN, &interface->state) ||
410 test_bit(__FM10K_RESETTING, &interface->state))
411 return;
412
413 if (interface->host_ready)
414 fm10k_watchdog_host_is_ready(interface);
415 else
416 fm10k_watchdog_host_not_ready(interface);
417
418 /* update stats only once every second */
419 if (time_is_before_jiffies(interface->next_stats_update))
420 fm10k_update_stats(interface);
421
422 /* flush any uncompleted work */
423 fm10k_watchdog_flush_tx(interface);
424}
425
426/**
427 * fm10k_check_hang_subtask - check for hung queues and dropped interrupts
428 * @interface - pointer to the device interface structure
429 *
430 * This function serves two purposes. First it strobes the interrupt lines
431 * in order to make certain interrupts are occurring. Secondly it sets the
432 * bits needed to check for TX hangs. As a result we should immediately
433 * determine if a hang has occurred.
434 */
435static void fm10k_check_hang_subtask(struct fm10k_intfc *interface)
436{
437 int i;
438
439 /* If we're down or resetting, just bail */
440 if (test_bit(__FM10K_DOWN, &interface->state) ||
441 test_bit(__FM10K_RESETTING, &interface->state))
442 return;
443
444 /* rate limit tx hang checks to only once every 2 seconds */
445 if (time_is_after_eq_jiffies(interface->next_tx_hang_check))
446 return;
447 interface->next_tx_hang_check = jiffies + (2 * HZ);
448
449 if (netif_carrier_ok(interface->netdev)) {
450 /* Force detection of hung controller */
451 for (i = 0; i < interface->num_tx_queues; i++)
452 set_check_for_tx_hang(interface->tx_ring[i]);
453
454 /* Rearm all in-use q_vectors for immediate firing */
455 for (i = 0; i < interface->num_q_vectors; i++) {
456 struct fm10k_q_vector *qv = interface->q_vector[i];
457
458 if (!qv->tx.count && !qv->rx.count)
459 continue;
460 writel(FM10K_ITR_ENABLE | FM10K_ITR_PENDING2, qv->itr);
461 }
462 }
463}
464
465/**
466 * fm10k_service_task - manages and runs subtasks
467 * @work: pointer to work_struct containing our data
468 **/
469static void fm10k_service_task(struct work_struct *work)
470{
471 struct fm10k_intfc *interface;
472
473 interface = container_of(work, struct fm10k_intfc, service_task);
474
475 /* tasks always capable of running, but must be rtnl protected */
476 fm10k_mbx_subtask(interface);
477 fm10k_detach_subtask(interface);
478 fm10k_reset_subtask(interface);
479
480 /* tasks only run when interface is up */
481 fm10k_watchdog_subtask(interface);
482 fm10k_check_hang_subtask(interface);
483
484 /* release lock on service events to allow scheduling next event */
485 fm10k_service_event_complete(interface);
486}
487
Alexander Duyck3abaae42014-09-20 19:49:43 -0400488/**
489 * fm10k_configure_tx_ring - Configure Tx ring after Reset
490 * @interface: board private structure
491 * @ring: structure containing ring specific data
492 *
493 * Configure the Tx descriptor ring after a reset.
494 **/
495static void fm10k_configure_tx_ring(struct fm10k_intfc *interface,
496 struct fm10k_ring *ring)
497{
498 struct fm10k_hw *hw = &interface->hw;
499 u64 tdba = ring->dma;
500 u32 size = ring->count * sizeof(struct fm10k_tx_desc);
501 u32 txint = FM10K_INT_MAP_DISABLE;
502 u32 txdctl = FM10K_TXDCTL_ENABLE | (1 << FM10K_TXDCTL_MAX_TIME_SHIFT);
503 u8 reg_idx = ring->reg_idx;
504
505 /* disable queue to avoid issues while updating state */
506 fm10k_write_reg(hw, FM10K_TXDCTL(reg_idx), 0);
507 fm10k_write_flush(hw);
508
509 /* possible poll here to verify ring resources have been cleaned */
510
511 /* set location and size for descriptor ring */
512 fm10k_write_reg(hw, FM10K_TDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
513 fm10k_write_reg(hw, FM10K_TDBAH(reg_idx), tdba >> 32);
514 fm10k_write_reg(hw, FM10K_TDLEN(reg_idx), size);
515
516 /* reset head and tail pointers */
517 fm10k_write_reg(hw, FM10K_TDH(reg_idx), 0);
518 fm10k_write_reg(hw, FM10K_TDT(reg_idx), 0);
519
520 /* store tail pointer */
521 ring->tail = &interface->uc_addr[FM10K_TDT(reg_idx)];
522
523 /* reset ntu and ntc to place SW in sync with hardwdare */
524 ring->next_to_clean = 0;
525 ring->next_to_use = 0;
526
527 /* Map interrupt */
528 if (ring->q_vector) {
529 txint = ring->q_vector->v_idx + NON_Q_VECTORS(hw);
530 txint |= FM10K_INT_MAP_TIMER0;
531 }
532
533 fm10k_write_reg(hw, FM10K_TXINT(reg_idx), txint);
534
535 /* enable use of FTAG bit in Tx descriptor, register is RO for VF */
536 fm10k_write_reg(hw, FM10K_PFVTCTL(reg_idx),
537 FM10K_PFVTCTL_FTAG_DESC_ENABLE);
538
539 /* enable queue */
540 fm10k_write_reg(hw, FM10K_TXDCTL(reg_idx), txdctl);
541}
542
543/**
544 * fm10k_enable_tx_ring - Verify Tx ring is enabled after configuration
545 * @interface: board private structure
546 * @ring: structure containing ring specific data
547 *
548 * Verify the Tx descriptor ring is ready for transmit.
549 **/
550static void fm10k_enable_tx_ring(struct fm10k_intfc *interface,
551 struct fm10k_ring *ring)
552{
553 struct fm10k_hw *hw = &interface->hw;
554 int wait_loop = 10;
555 u32 txdctl;
556 u8 reg_idx = ring->reg_idx;
557
558 /* if we are already enabled just exit */
559 if (fm10k_read_reg(hw, FM10K_TXDCTL(reg_idx)) & FM10K_TXDCTL_ENABLE)
560 return;
561
562 /* poll to verify queue is enabled */
563 do {
564 usleep_range(1000, 2000);
565 txdctl = fm10k_read_reg(hw, FM10K_TXDCTL(reg_idx));
566 } while (!(txdctl & FM10K_TXDCTL_ENABLE) && --wait_loop);
567 if (!wait_loop)
568 netif_err(interface, drv, interface->netdev,
569 "Could not enable Tx Queue %d\n", reg_idx);
570}
571
572/**
573 * fm10k_configure_tx - Configure Transmit Unit after Reset
574 * @interface: board private structure
575 *
576 * Configure the Tx unit of the MAC after a reset.
577 **/
578static void fm10k_configure_tx(struct fm10k_intfc *interface)
579{
580 int i;
581
582 /* Setup the HW Tx Head and Tail descriptor pointers */
583 for (i = 0; i < interface->num_tx_queues; i++)
584 fm10k_configure_tx_ring(interface, interface->tx_ring[i]);
585
586 /* poll here to verify that Tx rings are now enabled */
587 for (i = 0; i < interface->num_tx_queues; i++)
588 fm10k_enable_tx_ring(interface, interface->tx_ring[i]);
589}
590
591/**
592 * fm10k_configure_rx_ring - Configure Rx ring after Reset
593 * @interface: board private structure
594 * @ring: structure containing ring specific data
595 *
596 * Configure the Rx descriptor ring after a reset.
597 **/
598static void fm10k_configure_rx_ring(struct fm10k_intfc *interface,
599 struct fm10k_ring *ring)
600{
601 u64 rdba = ring->dma;
602 struct fm10k_hw *hw = &interface->hw;
603 u32 size = ring->count * sizeof(union fm10k_rx_desc);
604 u32 rxqctl = FM10K_RXQCTL_ENABLE | FM10K_RXQCTL_PF;
605 u32 rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
606 u32 srrctl = FM10K_SRRCTL_BUFFER_CHAINING_EN;
607 u32 rxint = FM10K_INT_MAP_DISABLE;
608 u8 rx_pause = interface->rx_pause;
609 u8 reg_idx = ring->reg_idx;
610
611 /* disable queue to avoid issues while updating state */
612 fm10k_write_reg(hw, FM10K_RXQCTL(reg_idx), 0);
613 fm10k_write_flush(hw);
614
615 /* possible poll here to verify ring resources have been cleaned */
616
617 /* set location and size for descriptor ring */
618 fm10k_write_reg(hw, FM10K_RDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
619 fm10k_write_reg(hw, FM10K_RDBAH(reg_idx), rdba >> 32);
620 fm10k_write_reg(hw, FM10K_RDLEN(reg_idx), size);
621
622 /* reset head and tail pointers */
623 fm10k_write_reg(hw, FM10K_RDH(reg_idx), 0);
624 fm10k_write_reg(hw, FM10K_RDT(reg_idx), 0);
625
626 /* store tail pointer */
627 ring->tail = &interface->uc_addr[FM10K_RDT(reg_idx)];
628
629 /* reset ntu and ntc to place SW in sync with hardwdare */
630 ring->next_to_clean = 0;
631 ring->next_to_use = 0;
632 ring->next_to_alloc = 0;
633
634 /* Configure the Rx buffer size for one buff without split */
635 srrctl |= FM10K_RX_BUFSZ >> FM10K_SRRCTL_BSIZEPKT_SHIFT;
636
637 /* Configure the Rx ring to supress loopback packets */
638 srrctl |= FM10K_SRRCTL_LOOPBACK_SUPPRESS;
639 fm10k_write_reg(hw, FM10K_SRRCTL(reg_idx), srrctl);
640
641 /* Enable drop on empty */
642#if defined(HAVE_DCBNL_IEEE) && defined(CONFIG_DCB)
643 if (interface->pfc_en)
644 rx_pause = interface->pfc_en;
645#endif
646 if (!(rx_pause & (1 << ring->qos_pc)))
647 rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
648
649 fm10k_write_reg(hw, FM10K_RXDCTL(reg_idx), rxdctl);
650
651 /* assign default VLAN to queue */
652 ring->vid = hw->mac.default_vid;
653
654 /* Map interrupt */
655 if (ring->q_vector) {
656 rxint = ring->q_vector->v_idx + NON_Q_VECTORS(hw);
657 rxint |= FM10K_INT_MAP_TIMER1;
658 }
659
660 fm10k_write_reg(hw, FM10K_RXINT(reg_idx), rxint);
661
662 /* enable queue */
663 fm10k_write_reg(hw, FM10K_RXQCTL(reg_idx), rxqctl);
664}
665
666/**
667 * fm10k_update_rx_drop_en - Configures the drop enable bits for Rx rings
668 * @interface: board private structure
669 *
670 * Configure the drop enable bits for the Rx rings.
671 **/
672void fm10k_update_rx_drop_en(struct fm10k_intfc *interface)
673{
674 struct fm10k_hw *hw = &interface->hw;
675 u8 rx_pause = interface->rx_pause;
676 int i;
677
678#if defined(HAVE_DCBNL_IEEE) && defined(CONFIG_DCB)
679 if (interface->pfc_en)
680 rx_pause = interface->pfc_en;
681
682#endif
683 for (i = 0; i < interface->num_rx_queues; i++) {
684 struct fm10k_ring *ring = interface->rx_ring[i];
685 u32 rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
686 u8 reg_idx = ring->reg_idx;
687
688 if (!(rx_pause & (1 << ring->qos_pc)))
689 rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
690
691 fm10k_write_reg(hw, FM10K_RXDCTL(reg_idx), rxdctl);
692 }
693}
694
695/**
696 * fm10k_configure_dglort - Configure Receive DGLORT after reset
697 * @interface: board private structure
698 *
699 * Configure the DGLORT description and RSS tables.
700 **/
701static void fm10k_configure_dglort(struct fm10k_intfc *interface)
702{
703 struct fm10k_dglort_cfg dglort = { 0 };
704 struct fm10k_hw *hw = &interface->hw;
705 int i;
706 u32 mrqc;
707
708 /* Fill out hash function seeds */
709 for (i = 0; i < FM10K_RSSRK_SIZE; i++)
710 fm10k_write_reg(hw, FM10K_RSSRK(0, i), interface->rssrk[i]);
711
712 /* Write RETA table to hardware */
713 for (i = 0; i < FM10K_RETA_SIZE; i++)
714 fm10k_write_reg(hw, FM10K_RETA(0, i), interface->reta[i]);
715
716 /* Generate RSS hash based on packet types, TCP/UDP
717 * port numbers and/or IPv4/v6 src and dst addresses
718 */
719 mrqc = FM10K_MRQC_IPV4 |
720 FM10K_MRQC_TCP_IPV4 |
721 FM10K_MRQC_IPV6 |
722 FM10K_MRQC_TCP_IPV6;
723
724 if (interface->flags & FM10K_FLAG_RSS_FIELD_IPV4_UDP)
725 mrqc |= FM10K_MRQC_UDP_IPV4;
726 if (interface->flags & FM10K_FLAG_RSS_FIELD_IPV6_UDP)
727 mrqc |= FM10K_MRQC_UDP_IPV6;
728
729 fm10k_write_reg(hw, FM10K_MRQC(0), mrqc);
730
731 /* configure default DGLORT mapping for RSS/DCB */
732 dglort.inner_rss = 1;
733 dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask);
734 dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask);
735 hw->mac.ops.configure_dglort_map(hw, &dglort);
736
737 /* assign GLORT per queue for queue mapped testing */
738 if (interface->glort_count > 64) {
739 memset(&dglort, 0, sizeof(dglort));
740 dglort.inner_rss = 1;
741 dglort.glort = interface->glort + 64;
742 dglort.idx = fm10k_dglort_pf_queue;
743 dglort.queue_l = fls(interface->num_rx_queues - 1);
744 hw->mac.ops.configure_dglort_map(hw, &dglort);
745 }
746
747 /* assign glort value for RSS/DCB specific to this interface */
748 memset(&dglort, 0, sizeof(dglort));
749 dglort.inner_rss = 1;
750 dglort.glort = interface->glort;
751 dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask);
752 dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask);
753 /* configure DGLORT mapping for RSS/DCB */
754 dglort.idx = fm10k_dglort_pf_rss;
755 hw->mac.ops.configure_dglort_map(hw, &dglort);
756}
757
758/**
759 * fm10k_configure_rx - Configure Receive Unit after Reset
760 * @interface: board private structure
761 *
762 * Configure the Rx unit of the MAC after a reset.
763 **/
764static void fm10k_configure_rx(struct fm10k_intfc *interface)
765{
766 int i;
767
768 /* Configure SWPRI to PC map */
769 fm10k_configure_swpri_map(interface);
770
771 /* Configure RSS and DGLORT map */
772 fm10k_configure_dglort(interface);
773
774 /* Setup the HW Rx Head and Tail descriptor pointers */
775 for (i = 0; i < interface->num_rx_queues; i++)
776 fm10k_configure_rx_ring(interface, interface->rx_ring[i]);
777
778 /* possible poll here to verify that Rx rings are now enabled */
779}
780
Alexander Duyck18283ca2014-09-20 19:48:51 -0400781static void fm10k_napi_enable_all(struct fm10k_intfc *interface)
782{
783 struct fm10k_q_vector *q_vector;
784 int q_idx;
785
786 for (q_idx = 0; q_idx < interface->num_q_vectors; q_idx++) {
787 q_vector = interface->q_vector[q_idx];
788 napi_enable(&q_vector->napi);
789 }
790}
791
792static irqreturn_t fm10k_msix_clean_rings(int irq, void *data)
793{
794 struct fm10k_q_vector *q_vector = data;
795
796 if (q_vector->rx.count || q_vector->tx.count)
797 napi_schedule(&q_vector->napi);
798
799 return IRQ_HANDLED;
800}
801
802#define FM10K_ERR_MSG(type) case (type): error = #type; break
803static void fm10k_print_fault(struct fm10k_intfc *interface, int type,
804 struct fm10k_fault *fault)
805{
806 struct pci_dev *pdev = interface->pdev;
807 char *error;
808
809 switch (type) {
810 case FM10K_PCA_FAULT:
811 switch (fault->type) {
812 default:
813 error = "Unknown PCA error";
814 break;
815 FM10K_ERR_MSG(PCA_NO_FAULT);
816 FM10K_ERR_MSG(PCA_UNMAPPED_ADDR);
817 FM10K_ERR_MSG(PCA_BAD_QACCESS_PF);
818 FM10K_ERR_MSG(PCA_BAD_QACCESS_VF);
819 FM10K_ERR_MSG(PCA_MALICIOUS_REQ);
820 FM10K_ERR_MSG(PCA_POISONED_TLP);
821 FM10K_ERR_MSG(PCA_TLP_ABORT);
822 }
823 break;
824 case FM10K_THI_FAULT:
825 switch (fault->type) {
826 default:
827 error = "Unknown THI error";
828 break;
829 FM10K_ERR_MSG(THI_NO_FAULT);
830 FM10K_ERR_MSG(THI_MAL_DIS_Q_FAULT);
831 }
832 break;
833 case FM10K_FUM_FAULT:
834 switch (fault->type) {
835 default:
836 error = "Unknown FUM error";
837 break;
838 FM10K_ERR_MSG(FUM_NO_FAULT);
839 FM10K_ERR_MSG(FUM_UNMAPPED_ADDR);
840 FM10K_ERR_MSG(FUM_BAD_VF_QACCESS);
841 FM10K_ERR_MSG(FUM_ADD_DECODE_ERR);
842 FM10K_ERR_MSG(FUM_RO_ERROR);
843 FM10K_ERR_MSG(FUM_QPRC_CRC_ERROR);
844 FM10K_ERR_MSG(FUM_CSR_TIMEOUT);
845 FM10K_ERR_MSG(FUM_INVALID_TYPE);
846 FM10K_ERR_MSG(FUM_INVALID_LENGTH);
847 FM10K_ERR_MSG(FUM_INVALID_BE);
848 FM10K_ERR_MSG(FUM_INVALID_ALIGN);
849 }
850 break;
851 default:
852 error = "Undocumented fault";
853 break;
854 }
855
856 dev_warn(&pdev->dev,
857 "%s Address: 0x%llx SpecInfo: 0x%x Func: %02x.%0x\n",
858 error, fault->address, fault->specinfo,
859 PCI_SLOT(fault->func), PCI_FUNC(fault->func));
860}
861
862static void fm10k_report_fault(struct fm10k_intfc *interface, u32 eicr)
863{
864 struct fm10k_hw *hw = &interface->hw;
865 struct fm10k_fault fault = { 0 };
866 int type, err;
867
868 for (eicr &= FM10K_EICR_FAULT_MASK, type = FM10K_PCA_FAULT;
869 eicr;
870 eicr >>= 1, type += FM10K_FAULT_SIZE) {
871 /* only check if there is an error reported */
872 if (!(eicr & 0x1))
873 continue;
874
875 /* retrieve fault info */
876 err = hw->mac.ops.get_fault(hw, type, &fault);
877 if (err) {
878 dev_err(&interface->pdev->dev,
879 "error reading fault\n");
880 continue;
881 }
882
883 fm10k_print_fault(interface, type, &fault);
884 }
885}
886
887static void fm10k_reset_drop_on_empty(struct fm10k_intfc *interface, u32 eicr)
888{
889 struct fm10k_hw *hw = &interface->hw;
890 const u32 rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
891 u32 maxholdq;
892 int q;
893
894 if (!(eicr & FM10K_EICR_MAXHOLDTIME))
895 return;
896
897 maxholdq = fm10k_read_reg(hw, FM10K_MAXHOLDQ(7));
898 if (maxholdq)
899 fm10k_write_reg(hw, FM10K_MAXHOLDQ(7), maxholdq);
900 for (q = 255;;) {
901 if (maxholdq & (1 << 31)) {
902 if (q < FM10K_MAX_QUEUES_PF) {
903 interface->rx_overrun_pf++;
904 fm10k_write_reg(hw, FM10K_RXDCTL(q), rxdctl);
905 } else {
906 interface->rx_overrun_vf++;
907 }
908 }
909
910 maxholdq *= 2;
911 if (!maxholdq)
912 q &= ~(32 - 1);
913
914 if (!q)
915 break;
916
917 if (q-- % 32)
918 continue;
919
920 maxholdq = fm10k_read_reg(hw, FM10K_MAXHOLDQ(q / 32));
921 if (maxholdq)
922 fm10k_write_reg(hw, FM10K_MAXHOLDQ(q / 32), maxholdq);
923 }
924}
925
926static irqreturn_t fm10k_msix_mbx_pf(int irq, void *data)
927{
928 struct fm10k_intfc *interface = data;
929 struct fm10k_hw *hw = &interface->hw;
930 struct fm10k_mbx_info *mbx = &hw->mbx;
931 u32 eicr;
932
933 /* unmask any set bits related to this interrupt */
934 eicr = fm10k_read_reg(hw, FM10K_EICR);
935 fm10k_write_reg(hw, FM10K_EICR, eicr & (FM10K_EICR_MAILBOX |
936 FM10K_EICR_SWITCHREADY |
937 FM10K_EICR_SWITCHNOTREADY));
938
939 /* report any faults found to the message log */
940 fm10k_report_fault(interface, eicr);
941
942 /* reset any queues disabled due to receiver overrun */
943 fm10k_reset_drop_on_empty(interface, eicr);
944
945 /* service mailboxes */
946 if (fm10k_mbx_trylock(interface)) {
947 mbx->ops.process(hw, mbx);
948 fm10k_mbx_unlock(interface);
949 }
950
Alexander Duyckb7d85142014-09-20 19:49:25 -0400951 /* if switch toggled state we should reset GLORTs */
952 if (eicr & FM10K_EICR_SWITCHNOTREADY) {
953 /* force link down for at least 4 seconds */
954 interface->link_down_event = jiffies + (4 * HZ);
955 set_bit(__FM10K_LINK_DOWN, &interface->state);
956
957 /* reset dglort_map back to no config */
958 hw->mac.dglort_map = FM10K_DGLORTMAP_NONE;
959 }
960
961 /* we should validate host state after interrupt event */
962 hw->mac.get_host_state = 1;
963 fm10k_service_event_schedule(interface);
964
Alexander Duyck18283ca2014-09-20 19:48:51 -0400965 /* re-enable mailbox interrupt and indicate 20us delay */
966 fm10k_write_reg(hw, FM10K_ITR(FM10K_MBX_VECTOR),
967 FM10K_ITR_ENABLE | FM10K_MBX_INT_DELAY);
968
969 return IRQ_HANDLED;
970}
971
972void fm10k_mbx_free_irq(struct fm10k_intfc *interface)
973{
974 struct msix_entry *entry = &interface->msix_entries[FM10K_MBX_VECTOR];
975 struct fm10k_hw *hw = &interface->hw;
976 int itr_reg;
977
978 /* disconnect the mailbox */
979 hw->mbx.ops.disconnect(hw, &hw->mbx);
980
981 /* disable Mailbox cause */
982 if (hw->mac.type == fm10k_mac_pf) {
983 fm10k_write_reg(hw, FM10K_EIMR,
984 FM10K_EIMR_DISABLE(PCA_FAULT) |
985 FM10K_EIMR_DISABLE(FUM_FAULT) |
986 FM10K_EIMR_DISABLE(MAILBOX) |
987 FM10K_EIMR_DISABLE(SWITCHREADY) |
988 FM10K_EIMR_DISABLE(SWITCHNOTREADY) |
989 FM10K_EIMR_DISABLE(SRAMERROR) |
990 FM10K_EIMR_DISABLE(VFLR) |
991 FM10K_EIMR_DISABLE(MAXHOLDTIME));
992 itr_reg = FM10K_ITR(FM10K_MBX_VECTOR);
993 }
994
995 fm10k_write_reg(hw, itr_reg, FM10K_ITR_MASK_SET);
996
997 free_irq(entry->vector, interface);
998}
999
1000/* generic error handler for mailbox issues */
1001static s32 fm10k_mbx_error(struct fm10k_hw *hw, u32 **results,
1002 struct fm10k_mbx_info *mbx)
1003{
1004 struct fm10k_intfc *interface;
1005 struct pci_dev *pdev;
1006
1007 interface = container_of(hw, struct fm10k_intfc, hw);
1008 pdev = interface->pdev;
1009
1010 dev_err(&pdev->dev, "Unknown message ID %u\n",
1011 **results & FM10K_TLV_ID_MASK);
1012
1013 return 0;
1014}
1015
1016static s32 fm10k_lport_map(struct fm10k_hw *hw, u32 **results,
1017 struct fm10k_mbx_info *mbx)
1018{
1019 struct fm10k_intfc *interface;
1020 u32 dglort_map = hw->mac.dglort_map;
1021 s32 err;
1022
1023 err = fm10k_msg_lport_map_pf(hw, results, mbx);
1024 if (err)
1025 return err;
1026
1027 interface = container_of(hw, struct fm10k_intfc, hw);
1028
1029 /* we need to reset if port count was just updated */
1030 if (dglort_map != hw->mac.dglort_map)
1031 interface->flags |= FM10K_FLAG_RESET_REQUESTED;
1032
1033 return 0;
1034}
1035
1036static s32 fm10k_update_pvid(struct fm10k_hw *hw, u32 **results,
1037 struct fm10k_mbx_info *mbx)
1038{
1039 struct fm10k_intfc *interface;
1040 u16 glort, pvid;
1041 u32 pvid_update;
1042 s32 err;
1043
1044 err = fm10k_tlv_attr_get_u32(results[FM10K_PF_ATTR_ID_UPDATE_PVID],
1045 &pvid_update);
1046 if (err)
1047 return err;
1048
1049 /* extract values from the pvid update */
1050 glort = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_GLORT);
1051 pvid = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_PVID);
1052
1053 /* if glort is not valid return error */
1054 if (!fm10k_glort_valid_pf(hw, glort))
1055 return FM10K_ERR_PARAM;
1056
1057 /* verify VID is valid */
1058 if (pvid >= FM10K_VLAN_TABLE_VID_MAX)
1059 return FM10K_ERR_PARAM;
1060
1061 interface = container_of(hw, struct fm10k_intfc, hw);
1062
1063 /* we need to reset if default VLAN was just updated */
1064 if (pvid != hw->mac.default_vid)
1065 interface->flags |= FM10K_FLAG_RESET_REQUESTED;
1066
1067 hw->mac.default_vid = pvid;
1068
1069 return 0;
1070}
1071
1072static const struct fm10k_msg_data pf_mbx_data[] = {
1073 FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
1074 FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
1075 FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_lport_map),
1076 FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
1077 FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
1078 FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_update_pvid),
1079 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_mbx_error),
1080};
1081
1082static int fm10k_mbx_request_irq_pf(struct fm10k_intfc *interface)
1083{
1084 struct msix_entry *entry = &interface->msix_entries[FM10K_MBX_VECTOR];
1085 struct net_device *dev = interface->netdev;
1086 struct fm10k_hw *hw = &interface->hw;
1087 int err;
1088
1089 /* Use timer0 for interrupt moderation on the mailbox */
1090 u32 mbx_itr = FM10K_INT_MAP_TIMER0 | entry->entry;
1091 u32 other_itr = FM10K_INT_MAP_IMMEDIATE | entry->entry;
1092
1093 /* register mailbox handlers */
1094 err = hw->mbx.ops.register_handlers(&hw->mbx, pf_mbx_data);
1095 if (err)
1096 return err;
1097
1098 /* request the IRQ */
1099 err = request_irq(entry->vector, fm10k_msix_mbx_pf, 0,
1100 dev->name, interface);
1101 if (err) {
1102 netif_err(interface, probe, dev,
1103 "request_irq for msix_mbx failed: %d\n", err);
1104 return err;
1105 }
1106
1107 /* Enable interrupts w/ no moderation for "other" interrupts */
1108 fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_PCIeFault), other_itr);
1109 fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_SwitchUpDown), other_itr);
1110 fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_SRAM), other_itr);
1111 fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_MaxHoldTime), other_itr);
1112 fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_VFLR), other_itr);
1113
1114 /* Enable interrupts w/ moderation for mailbox */
1115 fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_Mailbox), mbx_itr);
1116
1117 /* Enable individual interrupt causes */
1118 fm10k_write_reg(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) |
1119 FM10K_EIMR_ENABLE(FUM_FAULT) |
1120 FM10K_EIMR_ENABLE(MAILBOX) |
1121 FM10K_EIMR_ENABLE(SWITCHREADY) |
1122 FM10K_EIMR_ENABLE(SWITCHNOTREADY) |
1123 FM10K_EIMR_ENABLE(SRAMERROR) |
1124 FM10K_EIMR_ENABLE(VFLR) |
1125 FM10K_EIMR_ENABLE(MAXHOLDTIME));
1126
1127 /* enable interrupt */
1128 fm10k_write_reg(hw, FM10K_ITR(entry->entry), FM10K_ITR_ENABLE);
1129
1130 return 0;
1131}
1132
1133int fm10k_mbx_request_irq(struct fm10k_intfc *interface)
1134{
1135 struct fm10k_hw *hw = &interface->hw;
1136 int err;
1137
1138 /* enable Mailbox cause */
1139 err = fm10k_mbx_request_irq_pf(interface);
1140
1141 /* connect mailbox */
1142 if (!err)
1143 err = hw->mbx.ops.connect(hw, &hw->mbx);
1144
1145 return err;
1146}
1147
1148/**
1149 * fm10k_qv_free_irq - release interrupts associated with queue vectors
1150 * @interface: board private structure
1151 *
1152 * Release all interrupts associated with this interface
1153 **/
1154void fm10k_qv_free_irq(struct fm10k_intfc *interface)
1155{
1156 int vector = interface->num_q_vectors;
1157 struct fm10k_hw *hw = &interface->hw;
1158 struct msix_entry *entry;
1159
1160 entry = &interface->msix_entries[NON_Q_VECTORS(hw) + vector];
1161
1162 while (vector) {
1163 struct fm10k_q_vector *q_vector;
1164
1165 vector--;
1166 entry--;
1167 q_vector = interface->q_vector[vector];
1168
1169 if (!q_vector->tx.count && !q_vector->rx.count)
1170 continue;
1171
1172 /* disable interrupts */
1173
1174 writel(FM10K_ITR_MASK_SET, q_vector->itr);
1175
1176 free_irq(entry->vector, q_vector);
1177 }
1178}
1179
1180/**
1181 * fm10k_qv_request_irq - initialize interrupts for queue vectors
1182 * @interface: board private structure
1183 *
1184 * Attempts to configure interrupts using the best available
1185 * capabilities of the hardware and kernel.
1186 **/
1187int fm10k_qv_request_irq(struct fm10k_intfc *interface)
1188{
1189 struct net_device *dev = interface->netdev;
1190 struct fm10k_hw *hw = &interface->hw;
1191 struct msix_entry *entry;
1192 int ri = 0, ti = 0;
1193 int vector, err;
1194
1195 entry = &interface->msix_entries[NON_Q_VECTORS(hw)];
1196
1197 for (vector = 0; vector < interface->num_q_vectors; vector++) {
1198 struct fm10k_q_vector *q_vector = interface->q_vector[vector];
1199
1200 /* name the vector */
1201 if (q_vector->tx.count && q_vector->rx.count) {
1202 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1203 "%s-TxRx-%d", dev->name, ri++);
1204 ti++;
1205 } else if (q_vector->rx.count) {
1206 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1207 "%s-rx-%d", dev->name, ri++);
1208 } else if (q_vector->tx.count) {
1209 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1210 "%s-tx-%d", dev->name, ti++);
1211 } else {
1212 /* skip this unused q_vector */
1213 continue;
1214 }
1215
1216 /* Assign ITR register to q_vector */
1217 q_vector->itr = &interface->uc_addr[FM10K_ITR(entry->entry)];
1218
1219 /* request the IRQ */
1220 err = request_irq(entry->vector, &fm10k_msix_clean_rings, 0,
1221 q_vector->name, q_vector);
1222 if (err) {
1223 netif_err(interface, probe, dev,
1224 "request_irq failed for MSIX interrupt Error: %d\n",
1225 err);
1226 goto err_out;
1227 }
1228
1229 /* Enable q_vector */
1230 writel(FM10K_ITR_ENABLE, q_vector->itr);
1231
1232 entry++;
1233 }
1234
1235 return 0;
1236
1237err_out:
1238 /* wind through the ring freeing all entries and vectors */
1239 while (vector) {
1240 struct fm10k_q_vector *q_vector;
1241
1242 entry--;
1243 vector--;
1244 q_vector = interface->q_vector[vector];
1245
1246 if (!q_vector->tx.count && !q_vector->rx.count)
1247 continue;
1248
1249 /* disable interrupts */
1250
1251 writel(FM10K_ITR_MASK_SET, q_vector->itr);
1252
1253 free_irq(entry->vector, q_vector);
1254 }
1255
1256 return err;
1257}
1258
Alexander Duyck504c5ea2014-09-20 19:48:29 -04001259void fm10k_up(struct fm10k_intfc *interface)
1260{
1261 struct fm10k_hw *hw = &interface->hw;
1262
1263 /* Enable Tx/Rx DMA */
1264 hw->mac.ops.start_hw(hw);
1265
Alexander Duyck3abaae42014-09-20 19:49:43 -04001266 /* configure Tx descriptor rings */
1267 fm10k_configure_tx(interface);
1268
1269 /* configure Rx descriptor rings */
1270 fm10k_configure_rx(interface);
1271
Alexander Duyck504c5ea2014-09-20 19:48:29 -04001272 /* configure interrupts */
1273 hw->mac.ops.update_int_moderator(hw);
1274
1275 /* clear down bit to indicate we are ready to go */
1276 clear_bit(__FM10K_DOWN, &interface->state);
1277
Alexander Duyck18283ca2014-09-20 19:48:51 -04001278 /* enable polling cleanups */
1279 fm10k_napi_enable_all(interface);
1280
Alexander Duyck504c5ea2014-09-20 19:48:29 -04001281 /* re-establish Rx filters */
1282 fm10k_restore_rx_state(interface);
1283
1284 /* enable transmits */
1285 netif_tx_start_all_queues(interface->netdev);
Alexander Duyckb7d85142014-09-20 19:49:25 -04001286
1287 /* kick off the service timer */
1288 mod_timer(&interface->service_timer, jiffies);
Alexander Duyck504c5ea2014-09-20 19:48:29 -04001289}
1290
Alexander Duyck18283ca2014-09-20 19:48:51 -04001291static void fm10k_napi_disable_all(struct fm10k_intfc *interface)
1292{
1293 struct fm10k_q_vector *q_vector;
1294 int q_idx;
1295
1296 for (q_idx = 0; q_idx < interface->num_q_vectors; q_idx++) {
1297 q_vector = interface->q_vector[q_idx];
1298 napi_disable(&q_vector->napi);
1299 }
1300}
1301
Alexander Duyck504c5ea2014-09-20 19:48:29 -04001302void fm10k_down(struct fm10k_intfc *interface)
1303{
1304 struct net_device *netdev = interface->netdev;
1305 struct fm10k_hw *hw = &interface->hw;
1306
1307 /* signal that we are down to the interrupt handler and service task */
1308 set_bit(__FM10K_DOWN, &interface->state);
1309
1310 /* call carrier off first to avoid false dev_watchdog timeouts */
1311 netif_carrier_off(netdev);
1312
1313 /* disable transmits */
1314 netif_tx_stop_all_queues(netdev);
1315 netif_tx_disable(netdev);
1316
1317 /* reset Rx filters */
1318 fm10k_reset_rx_state(interface);
1319
1320 /* allow 10ms for device to quiesce */
1321 usleep_range(10000, 20000);
1322
Alexander Duyck18283ca2014-09-20 19:48:51 -04001323 /* disable polling routines */
1324 fm10k_napi_disable_all(interface);
1325
Alexander Duyckb7d85142014-09-20 19:49:25 -04001326 del_timer_sync(&interface->service_timer);
1327
1328 /* capture stats one last time before stopping interface */
1329 fm10k_update_stats(interface);
1330
Alexander Duyck504c5ea2014-09-20 19:48:29 -04001331 /* Disable DMA engine for Tx/Rx */
1332 hw->mac.ops.stop_hw(hw);
Alexander Duyck3abaae42014-09-20 19:49:43 -04001333
1334 /* free any buffers still on the rings */
1335 fm10k_clean_all_tx_rings(interface);
Alexander Duyck504c5ea2014-09-20 19:48:29 -04001336}
1337
Alexander Duyck0e7b3642014-09-20 19:48:10 -04001338/**
1339 * fm10k_sw_init - Initialize general software structures
1340 * @interface: host interface private structure to initialize
1341 *
1342 * fm10k_sw_init initializes the interface private data structure.
1343 * Fields are initialized based on PCI device information and
1344 * OS network device settings (MTU size).
1345 **/
1346static int fm10k_sw_init(struct fm10k_intfc *interface,
1347 const struct pci_device_id *ent)
1348{
1349 static const u32 seed[FM10K_RSSRK_SIZE] = { 0xda565a6d, 0xc20e5b25,
1350 0x3d256741, 0xb08fa343,
1351 0xcb2bcad0, 0xb4307bae,
1352 0xa32dcb77, 0x0cf23080,
1353 0x3bb7426a, 0xfa01acbe };
1354 const struct fm10k_info *fi = fm10k_info_tbl[ent->driver_data];
1355 struct fm10k_hw *hw = &interface->hw;
1356 struct pci_dev *pdev = interface->pdev;
1357 struct net_device *netdev = interface->netdev;
1358 unsigned int rss;
1359 int err;
1360
1361 /* initialize back pointer */
1362 hw->back = interface;
1363 hw->hw_addr = interface->uc_addr;
1364
1365 /* PCI config space info */
1366 hw->vendor_id = pdev->vendor;
1367 hw->device_id = pdev->device;
1368 hw->revision_id = pdev->revision;
1369 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1370 hw->subsystem_device_id = pdev->subsystem_device;
1371
1372 /* Setup hw api */
1373 memcpy(&hw->mac.ops, fi->mac_ops, sizeof(hw->mac.ops));
1374 hw->mac.type = fi->mac;
1375
1376 /* Set common capability flags and settings */
1377 rss = min_t(int, FM10K_MAX_RSS_INDICES, num_online_cpus());
1378 interface->ring_feature[RING_F_RSS].limit = rss;
1379 fi->get_invariants(hw);
1380
1381 /* pick up the PCIe bus settings for reporting later */
1382 if (hw->mac.ops.get_bus_info)
1383 hw->mac.ops.get_bus_info(hw);
1384
1385 /* limit the usable DMA range */
1386 if (hw->mac.ops.set_dma_mask)
1387 hw->mac.ops.set_dma_mask(hw, dma_get_mask(&pdev->dev));
1388
1389 /* update netdev with DMA restrictions */
1390 if (dma_get_mask(&pdev->dev) > DMA_BIT_MASK(32)) {
1391 netdev->features |= NETIF_F_HIGHDMA;
1392 netdev->vlan_features |= NETIF_F_HIGHDMA;
1393 }
1394
Alexander Duyckb7d85142014-09-20 19:49:25 -04001395 /* delay any future reset requests */
1396 interface->last_reset = jiffies + (10 * HZ);
1397
Alexander Duyck0e7b3642014-09-20 19:48:10 -04001398 /* reset and initialize the hardware so it is in a known state */
1399 err = hw->mac.ops.reset_hw(hw) ? : hw->mac.ops.init_hw(hw);
1400 if (err) {
1401 dev_err(&pdev->dev, "init_hw failed: %d\n", err);
1402 return err;
1403 }
1404
1405 /* initialize hardware statistics */
1406 hw->mac.ops.update_hw_stats(hw, &interface->stats);
1407
1408 /* Start with random Ethernet address */
1409 eth_random_addr(hw->mac.addr);
1410
1411 /* Initialize MAC address from hardware */
1412 err = hw->mac.ops.read_mac_addr(hw);
1413 if (err) {
1414 dev_warn(&pdev->dev,
1415 "Failed to obtain MAC address defaulting to random\n");
1416 /* tag address assignment as random */
1417 netdev->addr_assign_type |= NET_ADDR_RANDOM;
1418 }
1419
1420 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
1421 memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
1422
1423 if (!is_valid_ether_addr(netdev->perm_addr)) {
1424 dev_err(&pdev->dev, "Invalid MAC Address\n");
1425 return -EIO;
1426 }
1427
1428 /* Only the PF can support VXLAN and NVGRE offloads */
1429 if (hw->mac.type != fm10k_mac_pf) {
1430 netdev->hw_enc_features = 0;
1431 netdev->features &= ~NETIF_F_GSO_UDP_TUNNEL;
1432 netdev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL;
1433 }
1434
Alexander Duyckb7d85142014-09-20 19:49:25 -04001435 /* Initialize service timer and service task */
1436 set_bit(__FM10K_SERVICE_DISABLE, &interface->state);
1437 setup_timer(&interface->service_timer, &fm10k_service_timer,
1438 (unsigned long)interface);
1439 INIT_WORK(&interface->service_task, fm10k_service_task);
1440
Alexander Duycke27ef592014-09-20 19:49:03 -04001441 /* set default ring sizes */
1442 interface->tx_ring_count = FM10K_DEFAULT_TXD;
1443 interface->rx_ring_count = FM10K_DEFAULT_RXD;
1444
Alexander Duyck18283ca2014-09-20 19:48:51 -04001445 /* set default interrupt moderation */
1446 interface->tx_itr = FM10K_ITR_10K;
1447 interface->rx_itr = FM10K_ITR_ADAPTIVE | FM10K_ITR_20K;
1448
Alexander Duyck0e7b3642014-09-20 19:48:10 -04001449 /* initialize vxlan_port list */
1450 INIT_LIST_HEAD(&interface->vxlan_port);
1451
1452 /* initialize RSS key */
1453 memcpy(interface->rssrk, seed, sizeof(seed));
1454
1455 /* Start off interface as being down */
1456 set_bit(__FM10K_DOWN, &interface->state);
1457
1458 return 0;
1459}
1460
1461static void fm10k_slot_warn(struct fm10k_intfc *interface)
1462{
1463 struct device *dev = &interface->pdev->dev;
1464 struct fm10k_hw *hw = &interface->hw;
1465
1466 if (hw->mac.ops.is_slot_appropriate(hw))
1467 return;
1468
1469 dev_warn(dev,
1470 "For optimal performance, a %s %s slot is recommended.\n",
1471 (hw->bus_caps.width == fm10k_bus_width_pcie_x1 ? "x1" :
1472 hw->bus_caps.width == fm10k_bus_width_pcie_x4 ? "x4" :
1473 "x8"),
1474 (hw->bus_caps.speed == fm10k_bus_speed_2500 ? "2.5GT/s" :
1475 hw->bus_caps.speed == fm10k_bus_speed_5000 ? "5.0GT/s" :
1476 "8.0GT/s"));
1477 dev_warn(dev,
1478 "A slot with more lanes and/or higher speed is suggested.\n");
1479}
1480
Alexander Duyckb3890e32014-09-20 19:46:05 -04001481/**
1482 * fm10k_probe - Device Initialization Routine
1483 * @pdev: PCI device information struct
1484 * @ent: entry in fm10k_pci_tbl
1485 *
1486 * Returns 0 on success, negative on failure
1487 *
1488 * fm10k_probe initializes an interface identified by a pci_dev structure.
1489 * The OS initialization, configuring of the interface private structure,
1490 * and a hardware reset occur.
1491 **/
1492static int fm10k_probe(struct pci_dev *pdev,
1493 const struct pci_device_id *ent)
1494{
Alexander Duyck0e7b3642014-09-20 19:48:10 -04001495 struct net_device *netdev;
1496 struct fm10k_intfc *interface;
1497 struct fm10k_hw *hw;
Alexander Duyckb3890e32014-09-20 19:46:05 -04001498 int err;
1499 u64 dma_mask;
1500
1501 err = pci_enable_device_mem(pdev);
1502 if (err)
1503 return err;
1504
1505 /* By default fm10k only supports a 48 bit DMA mask */
1506 dma_mask = DMA_BIT_MASK(48) | dma_get_required_mask(&pdev->dev);
1507
1508 if ((dma_mask <= DMA_BIT_MASK(32)) ||
1509 dma_set_mask_and_coherent(&pdev->dev, dma_mask)) {
1510 dma_mask &= DMA_BIT_MASK(32);
1511
1512 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1513 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1514 if (err) {
1515 err = dma_set_coherent_mask(&pdev->dev,
1516 DMA_BIT_MASK(32));
1517 if (err) {
1518 dev_err(&pdev->dev,
1519 "No usable DMA configuration, aborting\n");
1520 goto err_dma;
1521 }
1522 }
1523 }
1524
1525 err = pci_request_selected_regions(pdev,
1526 pci_select_bars(pdev,
1527 IORESOURCE_MEM),
1528 fm10k_driver_name);
1529 if (err) {
1530 dev_err(&pdev->dev,
1531 "pci_request_selected_regions failed 0x%x\n", err);
1532 goto err_pci_reg;
1533 }
1534
1535 pci_set_master(pdev);
1536 pci_save_state(pdev);
1537
Alexander Duyck0e7b3642014-09-20 19:48:10 -04001538 netdev = fm10k_alloc_netdev();
1539 if (!netdev) {
1540 err = -ENOMEM;
1541 goto err_alloc_netdev;
1542 }
1543
1544 SET_NETDEV_DEV(netdev, &pdev->dev);
1545
1546 interface = netdev_priv(netdev);
1547 pci_set_drvdata(pdev, interface);
1548
1549 interface->netdev = netdev;
1550 interface->pdev = pdev;
1551 hw = &interface->hw;
1552
1553 interface->uc_addr = ioremap(pci_resource_start(pdev, 0),
1554 FM10K_UC_ADDR_SIZE);
1555 if (!interface->uc_addr) {
1556 err = -EIO;
1557 goto err_ioremap;
1558 }
1559
1560 err = fm10k_sw_init(interface, ent);
1561 if (err)
1562 goto err_sw_init;
1563
Alexander Duyck18283ca2014-09-20 19:48:51 -04001564 err = fm10k_init_queueing_scheme(interface);
1565 if (err)
1566 goto err_sw_init;
1567
1568 err = fm10k_mbx_request_irq(interface);
1569 if (err)
1570 goto err_mbx_interrupt;
1571
Alexander Duyck0e7b3642014-09-20 19:48:10 -04001572 /* final check of hardware state before registering the interface */
1573 err = fm10k_hw_ready(interface);
1574 if (err)
1575 goto err_register;
1576
1577 err = register_netdev(netdev);
1578 if (err)
1579 goto err_register;
1580
1581 /* carrier off reporting is important to ethtool even BEFORE open */
1582 netif_carrier_off(netdev);
1583
1584 /* stop all the transmit queues from transmitting until link is up */
1585 netif_tx_stop_all_queues(netdev);
1586
1587 /* print bus type/speed/width info */
1588 dev_info(&pdev->dev, "(PCI Express:%s Width: %s Payload: %s)\n",
1589 (hw->bus.speed == fm10k_bus_speed_8000 ? "8.0GT/s" :
1590 hw->bus.speed == fm10k_bus_speed_5000 ? "5.0GT/s" :
1591 hw->bus.speed == fm10k_bus_speed_2500 ? "2.5GT/s" :
1592 "Unknown"),
1593 (hw->bus.width == fm10k_bus_width_pcie_x8 ? "x8" :
1594 hw->bus.width == fm10k_bus_width_pcie_x4 ? "x4" :
1595 hw->bus.width == fm10k_bus_width_pcie_x1 ? "x1" :
1596 "Unknown"),
1597 (hw->bus.payload == fm10k_bus_payload_128 ? "128B" :
1598 hw->bus.payload == fm10k_bus_payload_256 ? "256B" :
1599 hw->bus.payload == fm10k_bus_payload_512 ? "512B" :
1600 "Unknown"));
1601
1602 /* print warning for non-optimal configurations */
1603 fm10k_slot_warn(interface);
1604
Alexander Duyckb7d85142014-09-20 19:49:25 -04001605 /* clear the service task disable bit to allow service task to start */
1606 clear_bit(__FM10K_SERVICE_DISABLE, &interface->state);
1607
Alexander Duyckb3890e32014-09-20 19:46:05 -04001608 return 0;
1609
Alexander Duyck0e7b3642014-09-20 19:48:10 -04001610err_register:
Alexander Duyck18283ca2014-09-20 19:48:51 -04001611 fm10k_mbx_free_irq(interface);
1612err_mbx_interrupt:
1613 fm10k_clear_queueing_scheme(interface);
Alexander Duyck0e7b3642014-09-20 19:48:10 -04001614err_sw_init:
1615 iounmap(interface->uc_addr);
1616err_ioremap:
1617 free_netdev(netdev);
1618err_alloc_netdev:
1619 pci_release_selected_regions(pdev,
1620 pci_select_bars(pdev, IORESOURCE_MEM));
Alexander Duyckb3890e32014-09-20 19:46:05 -04001621err_pci_reg:
1622err_dma:
1623 pci_disable_device(pdev);
1624 return err;
1625}
1626
1627/**
1628 * fm10k_remove - Device Removal Routine
1629 * @pdev: PCI device information struct
1630 *
1631 * fm10k_remove is called by the PCI subsystem to alert the driver
1632 * that it should release a PCI device. The could be caused by a
1633 * Hot-Plug event, or because the driver is going to be removed from
1634 * memory.
1635 **/
1636static void fm10k_remove(struct pci_dev *pdev)
1637{
Alexander Duyck0e7b3642014-09-20 19:48:10 -04001638 struct fm10k_intfc *interface = pci_get_drvdata(pdev);
1639 struct net_device *netdev = interface->netdev;
1640
Alexander Duyckb7d85142014-09-20 19:49:25 -04001641 set_bit(__FM10K_SERVICE_DISABLE, &interface->state);
1642 cancel_work_sync(&interface->service_task);
1643
Alexander Duyck0e7b3642014-09-20 19:48:10 -04001644 /* free netdev, this may bounce the interrupts due to setup_tc */
1645 if (netdev->reg_state == NETREG_REGISTERED)
1646 unregister_netdev(netdev);
1647
Alexander Duyck18283ca2014-09-20 19:48:51 -04001648 /* disable mailbox interrupt */
1649 fm10k_mbx_free_irq(interface);
1650
1651 /* free interrupts */
1652 fm10k_clear_queueing_scheme(interface);
1653
Alexander Duyck0e7b3642014-09-20 19:48:10 -04001654 iounmap(interface->uc_addr);
1655
1656 free_netdev(netdev);
1657
Alexander Duyckb3890e32014-09-20 19:46:05 -04001658 pci_release_selected_regions(pdev,
1659 pci_select_bars(pdev, IORESOURCE_MEM));
1660
1661 pci_disable_device(pdev);
1662}
1663
1664static struct pci_driver fm10k_driver = {
1665 .name = fm10k_driver_name,
1666 .id_table = fm10k_pci_tbl,
1667 .probe = fm10k_probe,
1668 .remove = fm10k_remove,
1669};
1670
1671/**
1672 * fm10k_register_pci_driver - register driver interface
1673 *
1674 * This funciton is called on module load in order to register the driver.
1675 **/
1676int fm10k_register_pci_driver(void)
1677{
1678 return pci_register_driver(&fm10k_driver);
1679}
1680
1681/**
1682 * fm10k_unregister_pci_driver - unregister driver interface
1683 *
1684 * This funciton is called on module unload in order to remove the driver.
1685 **/
1686void fm10k_unregister_pci_driver(void)
1687{
1688 pci_unregister_driver(&fm10k_driver);
1689}