blob: 229c7e491251bafbe0a886a6d4db05bb16125cb4 [file] [log] [blame]
Alexander Duyckb3890e32014-09-20 19:46:05 -04001/* Intel Ethernet Switch Host Interface Driver
2 * Copyright(c) 2013 - 2014 Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * The full GNU General Public License is included in this distribution in
14 * the file called "COPYING".
15 *
16 * Contact Information:
17 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
18 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
19 */
20
21#include <linux/module.h>
22
23#include "fm10k.h"
24
Alexander Duyck0e7b3642014-09-20 19:48:10 -040025static const struct fm10k_info *fm10k_info_tbl[] = {
26 [fm10k_device_pf] = &fm10k_pf_info,
27};
28
Alexander Duyckb3890e32014-09-20 19:46:05 -040029/**
30 * fm10k_pci_tbl - PCI Device ID Table
31 *
32 * Wildcard entries (PCI_ANY_ID) should come last
33 * Last entry must be all 0s
34 *
35 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
36 * Class, Class Mask, private data (not used) }
37 */
38static const struct pci_device_id fm10k_pci_tbl[] = {
Alexander Duyck0e7b3642014-09-20 19:48:10 -040039 { PCI_VDEVICE(INTEL, FM10K_DEV_ID_PF), fm10k_device_pf },
Alexander Duyckb3890e32014-09-20 19:46:05 -040040 /* required last entry */
41 { 0, }
42};
43MODULE_DEVICE_TABLE(pci, fm10k_pci_tbl);
44
Alexander Duyck04a5aef2014-09-20 19:46:45 -040045u16 fm10k_read_pci_cfg_word(struct fm10k_hw *hw, u32 reg)
46{
47 struct fm10k_intfc *interface = hw->back;
48 u16 value = 0;
49
50 if (FM10K_REMOVED(hw->hw_addr))
51 return ~value;
52
53 pci_read_config_word(interface->pdev, reg, &value);
54 if (value == 0xFFFF)
55 fm10k_write_flush(hw);
56
57 return value;
58}
59
60u32 fm10k_read_reg(struct fm10k_hw *hw, int reg)
61{
62 u32 __iomem *hw_addr = ACCESS_ONCE(hw->hw_addr);
63 u32 value = 0;
64
65 if (FM10K_REMOVED(hw_addr))
66 return ~value;
67
68 value = readl(&hw_addr[reg]);
Alexander Duyck0e7b3642014-09-20 19:48:10 -040069 if (!(~value) && (!reg || !(~readl(hw_addr)))) {
70 struct fm10k_intfc *interface = hw->back;
71 struct net_device *netdev = interface->netdev;
72
Alexander Duyck04a5aef2014-09-20 19:46:45 -040073 hw->hw_addr = NULL;
Alexander Duyck0e7b3642014-09-20 19:48:10 -040074 netif_device_detach(netdev);
75 netdev_err(netdev, "PCIe link lost, device now detached\n");
76 }
Alexander Duyck04a5aef2014-09-20 19:46:45 -040077
78 return value;
79}
80
Alexander Duyck0e7b3642014-09-20 19:48:10 -040081static int fm10k_hw_ready(struct fm10k_intfc *interface)
82{
83 struct fm10k_hw *hw = &interface->hw;
84
85 fm10k_write_flush(hw);
86
87 return FM10K_REMOVED(hw->hw_addr) ? -ENODEV : 0;
88}
89
Alexander Duyckb7d85142014-09-20 19:49:25 -040090void fm10k_service_event_schedule(struct fm10k_intfc *interface)
91{
92 if (!test_bit(__FM10K_SERVICE_DISABLE, &interface->state) &&
93 !test_and_set_bit(__FM10K_SERVICE_SCHED, &interface->state))
94 schedule_work(&interface->service_task);
95}
96
97static void fm10k_service_event_complete(struct fm10k_intfc *interface)
98{
99 BUG_ON(!test_bit(__FM10K_SERVICE_SCHED, &interface->state));
100
101 /* flush memory to make sure state is correct before next watchog */
102 smp_mb__before_atomic();
103 clear_bit(__FM10K_SERVICE_SCHED, &interface->state);
104}
105
106/**
107 * fm10k_service_timer - Timer Call-back
108 * @data: pointer to interface cast into an unsigned long
109 **/
110static void fm10k_service_timer(unsigned long data)
111{
112 struct fm10k_intfc *interface = (struct fm10k_intfc *)data;
113
114 /* Reset the timer */
115 mod_timer(&interface->service_timer, (HZ * 2) + jiffies);
116
117 fm10k_service_event_schedule(interface);
118}
119
120static void fm10k_detach_subtask(struct fm10k_intfc *interface)
121{
122 struct net_device *netdev = interface->netdev;
123
124 /* do nothing if device is still present or hw_addr is set */
125 if (netif_device_present(netdev) || interface->hw.hw_addr)
126 return;
127
128 rtnl_lock();
129
130 if (netif_running(netdev))
131 dev_close(netdev);
132
133 rtnl_unlock();
134}
135
136static void fm10k_reinit(struct fm10k_intfc *interface)
137{
138 struct net_device *netdev = interface->netdev;
139 struct fm10k_hw *hw = &interface->hw;
140 int err;
141
142 WARN_ON(in_interrupt());
143
144 /* put off any impending NetWatchDogTimeout */
145 netdev->trans_start = jiffies;
146
147 while (test_and_set_bit(__FM10K_RESETTING, &interface->state))
148 usleep_range(1000, 2000);
149
150 rtnl_lock();
151
152 if (netif_running(netdev))
153 fm10k_close(netdev);
154
155 fm10k_mbx_free_irq(interface);
156
157 /* delay any future reset requests */
158 interface->last_reset = jiffies + (10 * HZ);
159
160 /* reset and initialize the hardware so it is in a known state */
161 err = hw->mac.ops.reset_hw(hw) ? : hw->mac.ops.init_hw(hw);
162 if (err)
163 dev_err(&interface->pdev->dev, "init_hw failed: %d\n", err);
164
165 /* reassociate interrupts */
166 fm10k_mbx_request_irq(interface);
167
168 if (netif_running(netdev))
169 fm10k_open(netdev);
170
171 rtnl_unlock();
172
173 clear_bit(__FM10K_RESETTING, &interface->state);
174}
175
176static void fm10k_reset_subtask(struct fm10k_intfc *interface)
177{
178 if (!(interface->flags & FM10K_FLAG_RESET_REQUESTED))
179 return;
180
181 interface->flags &= ~FM10K_FLAG_RESET_REQUESTED;
182
183 netdev_err(interface->netdev, "Reset interface\n");
184 interface->tx_timeout_count++;
185
186 fm10k_reinit(interface);
187}
188
189/**
190 * fm10k_configure_swpri_map - Configure Receive SWPRI to PC mapping
191 * @interface: board private structure
192 *
193 * Configure the SWPRI to PC mapping for the port.
194 **/
195static void fm10k_configure_swpri_map(struct fm10k_intfc *interface)
196{
197 struct net_device *netdev = interface->netdev;
198 struct fm10k_hw *hw = &interface->hw;
199 int i;
200
201 /* clear flag indicating update is needed */
202 interface->flags &= ~FM10K_FLAG_SWPRI_CONFIG;
203
204 /* these registers are only available on the PF */
205 if (hw->mac.type != fm10k_mac_pf)
206 return;
207
208 /* configure SWPRI to PC map */
209 for (i = 0; i < FM10K_SWPRI_MAX; i++)
210 fm10k_write_reg(hw, FM10K_SWPRI_MAP(i),
211 netdev_get_prio_tc_map(netdev, i));
212}
213
214/**
215 * fm10k_watchdog_update_host_state - Update the link status based on host.
216 * @interface: board private structure
217 **/
218static void fm10k_watchdog_update_host_state(struct fm10k_intfc *interface)
219{
220 struct fm10k_hw *hw = &interface->hw;
221 s32 err;
222
223 if (test_bit(__FM10K_LINK_DOWN, &interface->state)) {
224 interface->host_ready = false;
225 if (time_is_after_jiffies(interface->link_down_event))
226 return;
227 clear_bit(__FM10K_LINK_DOWN, &interface->state);
228 }
229
230 if (interface->flags & FM10K_FLAG_SWPRI_CONFIG) {
231 if (rtnl_trylock()) {
232 fm10k_configure_swpri_map(interface);
233 rtnl_unlock();
234 }
235 }
236
237 /* lock the mailbox for transmit and receive */
238 fm10k_mbx_lock(interface);
239
240 err = hw->mac.ops.get_host_state(hw, &interface->host_ready);
241 if (err && time_is_before_jiffies(interface->last_reset))
242 interface->flags |= FM10K_FLAG_RESET_REQUESTED;
243
244 /* free the lock */
245 fm10k_mbx_unlock(interface);
246}
247
248/**
249 * fm10k_mbx_subtask - Process upstream and downstream mailboxes
250 * @interface: board private structure
251 *
252 * This function will process both the upstream and downstream mailboxes.
253 * It is necessary for us to hold the rtnl_lock while doing this as the
254 * mailbox accesses are protected by this lock.
255 **/
256static void fm10k_mbx_subtask(struct fm10k_intfc *interface)
257{
258 /* process upstream mailbox and update device state */
259 fm10k_watchdog_update_host_state(interface);
260}
261
262/**
263 * fm10k_watchdog_host_is_ready - Update netdev status based on host ready
264 * @interface: board private structure
265 **/
266static void fm10k_watchdog_host_is_ready(struct fm10k_intfc *interface)
267{
268 struct net_device *netdev = interface->netdev;
269
270 /* only continue if link state is currently down */
271 if (netif_carrier_ok(netdev))
272 return;
273
274 netif_info(interface, drv, netdev, "NIC Link is up\n");
275
276 netif_carrier_on(netdev);
277 netif_tx_wake_all_queues(netdev);
278}
279
280/**
281 * fm10k_watchdog_host_not_ready - Update netdev status based on host not ready
282 * @interface: board private structure
283 **/
284static void fm10k_watchdog_host_not_ready(struct fm10k_intfc *interface)
285{
286 struct net_device *netdev = interface->netdev;
287
288 /* only continue if link state is currently up */
289 if (!netif_carrier_ok(netdev))
290 return;
291
292 netif_info(interface, drv, netdev, "NIC Link is down\n");
293
294 netif_carrier_off(netdev);
295 netif_tx_stop_all_queues(netdev);
296}
297
298/**
299 * fm10k_update_stats - Update the board statistics counters.
300 * @interface: board private structure
301 **/
302void fm10k_update_stats(struct fm10k_intfc *interface)
303{
304 struct net_device_stats *net_stats = &interface->netdev->stats;
305 struct fm10k_hw *hw = &interface->hw;
306 u64 rx_errors = 0, rx_csum_errors = 0, tx_csum_errors = 0;
307 u64 restart_queue = 0, tx_busy = 0, alloc_failed = 0;
308 u64 rx_bytes_nic = 0, rx_pkts_nic = 0, rx_drops_nic = 0;
309 u64 tx_bytes_nic = 0, tx_pkts_nic = 0;
310 u64 bytes, pkts;
311 int i;
312
313 /* do not allow stats update via service task for next second */
314 interface->next_stats_update = jiffies + HZ;
315
316 /* gather some stats to the interface struct that are per queue */
317 for (bytes = 0, pkts = 0, i = 0; i < interface->num_tx_queues; i++) {
318 struct fm10k_ring *tx_ring = interface->tx_ring[i];
319
320 restart_queue += tx_ring->tx_stats.restart_queue;
321 tx_busy += tx_ring->tx_stats.tx_busy;
322 tx_csum_errors += tx_ring->tx_stats.csum_err;
323 bytes += tx_ring->stats.bytes;
324 pkts += tx_ring->stats.packets;
325 }
326
327 interface->restart_queue = restart_queue;
328 interface->tx_busy = tx_busy;
329 net_stats->tx_bytes = bytes;
330 net_stats->tx_packets = pkts;
331 interface->tx_csum_errors = tx_csum_errors;
332 /* gather some stats to the interface struct that are per queue */
333 for (bytes = 0, pkts = 0, i = 0; i < interface->num_rx_queues; i++) {
334 struct fm10k_ring *rx_ring = interface->rx_ring[i];
335
336 bytes += rx_ring->stats.bytes;
337 pkts += rx_ring->stats.packets;
338 alloc_failed += rx_ring->rx_stats.alloc_failed;
339 rx_csum_errors += rx_ring->rx_stats.csum_err;
340 rx_errors += rx_ring->rx_stats.errors;
341 }
342
343 net_stats->rx_bytes = bytes;
344 net_stats->rx_packets = pkts;
345 interface->alloc_failed = alloc_failed;
346 interface->rx_csum_errors = rx_csum_errors;
347 interface->rx_errors = rx_errors;
348
349 hw->mac.ops.update_hw_stats(hw, &interface->stats);
350
351 for (i = 0; i < FM10K_MAX_QUEUES_PF; i++) {
352 struct fm10k_hw_stats_q *q = &interface->stats.q[i];
353
354 tx_bytes_nic += q->tx_bytes.count;
355 tx_pkts_nic += q->tx_packets.count;
356 rx_bytes_nic += q->rx_bytes.count;
357 rx_pkts_nic += q->rx_packets.count;
358 rx_drops_nic += q->rx_drops.count;
359 }
360
361 interface->tx_bytes_nic = tx_bytes_nic;
362 interface->tx_packets_nic = tx_pkts_nic;
363 interface->rx_bytes_nic = rx_bytes_nic;
364 interface->rx_packets_nic = rx_pkts_nic;
365 interface->rx_drops_nic = rx_drops_nic;
366
367 /* Fill out the OS statistics structure */
368 net_stats->rx_errors = interface->stats.xec.count;
369 net_stats->rx_dropped = interface->stats.nodesc_drop.count;
370}
371
372/**
373 * fm10k_watchdog_flush_tx - flush queues on host not ready
374 * @interface - pointer to the device interface structure
375 **/
376static void fm10k_watchdog_flush_tx(struct fm10k_intfc *interface)
377{
378 int some_tx_pending = 0;
379 int i;
380
381 /* nothing to do if carrier is up */
382 if (netif_carrier_ok(interface->netdev))
383 return;
384
385 for (i = 0; i < interface->num_tx_queues; i++) {
386 struct fm10k_ring *tx_ring = interface->tx_ring[i];
387
388 if (tx_ring->next_to_use != tx_ring->next_to_clean) {
389 some_tx_pending = 1;
390 break;
391 }
392 }
393
394 /* We've lost link, so the controller stops DMA, but we've got
395 * queued Tx work that's never going to get done, so reset
396 * controller to flush Tx.
397 */
398 if (some_tx_pending)
399 interface->flags |= FM10K_FLAG_RESET_REQUESTED;
400}
401
402/**
403 * fm10k_watchdog_subtask - check and bring link up
404 * @interface - pointer to the device interface structure
405 **/
406static void fm10k_watchdog_subtask(struct fm10k_intfc *interface)
407{
408 /* if interface is down do nothing */
409 if (test_bit(__FM10K_DOWN, &interface->state) ||
410 test_bit(__FM10K_RESETTING, &interface->state))
411 return;
412
413 if (interface->host_ready)
414 fm10k_watchdog_host_is_ready(interface);
415 else
416 fm10k_watchdog_host_not_ready(interface);
417
418 /* update stats only once every second */
419 if (time_is_before_jiffies(interface->next_stats_update))
420 fm10k_update_stats(interface);
421
422 /* flush any uncompleted work */
423 fm10k_watchdog_flush_tx(interface);
424}
425
426/**
427 * fm10k_check_hang_subtask - check for hung queues and dropped interrupts
428 * @interface - pointer to the device interface structure
429 *
430 * This function serves two purposes. First it strobes the interrupt lines
431 * in order to make certain interrupts are occurring. Secondly it sets the
432 * bits needed to check for TX hangs. As a result we should immediately
433 * determine if a hang has occurred.
434 */
435static void fm10k_check_hang_subtask(struct fm10k_intfc *interface)
436{
437 int i;
438
439 /* If we're down or resetting, just bail */
440 if (test_bit(__FM10K_DOWN, &interface->state) ||
441 test_bit(__FM10K_RESETTING, &interface->state))
442 return;
443
444 /* rate limit tx hang checks to only once every 2 seconds */
445 if (time_is_after_eq_jiffies(interface->next_tx_hang_check))
446 return;
447 interface->next_tx_hang_check = jiffies + (2 * HZ);
448
449 if (netif_carrier_ok(interface->netdev)) {
450 /* Force detection of hung controller */
451 for (i = 0; i < interface->num_tx_queues; i++)
452 set_check_for_tx_hang(interface->tx_ring[i]);
453
454 /* Rearm all in-use q_vectors for immediate firing */
455 for (i = 0; i < interface->num_q_vectors; i++) {
456 struct fm10k_q_vector *qv = interface->q_vector[i];
457
458 if (!qv->tx.count && !qv->rx.count)
459 continue;
460 writel(FM10K_ITR_ENABLE | FM10K_ITR_PENDING2, qv->itr);
461 }
462 }
463}
464
465/**
466 * fm10k_service_task - manages and runs subtasks
467 * @work: pointer to work_struct containing our data
468 **/
469static void fm10k_service_task(struct work_struct *work)
470{
471 struct fm10k_intfc *interface;
472
473 interface = container_of(work, struct fm10k_intfc, service_task);
474
475 /* tasks always capable of running, but must be rtnl protected */
476 fm10k_mbx_subtask(interface);
477 fm10k_detach_subtask(interface);
478 fm10k_reset_subtask(interface);
479
480 /* tasks only run when interface is up */
481 fm10k_watchdog_subtask(interface);
482 fm10k_check_hang_subtask(interface);
483
484 /* release lock on service events to allow scheduling next event */
485 fm10k_service_event_complete(interface);
486}
487
Alexander Duyck3abaae42014-09-20 19:49:43 -0400488/**
489 * fm10k_configure_tx_ring - Configure Tx ring after Reset
490 * @interface: board private structure
491 * @ring: structure containing ring specific data
492 *
493 * Configure the Tx descriptor ring after a reset.
494 **/
495static void fm10k_configure_tx_ring(struct fm10k_intfc *interface,
496 struct fm10k_ring *ring)
497{
498 struct fm10k_hw *hw = &interface->hw;
499 u64 tdba = ring->dma;
500 u32 size = ring->count * sizeof(struct fm10k_tx_desc);
501 u32 txint = FM10K_INT_MAP_DISABLE;
502 u32 txdctl = FM10K_TXDCTL_ENABLE | (1 << FM10K_TXDCTL_MAX_TIME_SHIFT);
503 u8 reg_idx = ring->reg_idx;
504
505 /* disable queue to avoid issues while updating state */
506 fm10k_write_reg(hw, FM10K_TXDCTL(reg_idx), 0);
507 fm10k_write_flush(hw);
508
509 /* possible poll here to verify ring resources have been cleaned */
510
511 /* set location and size for descriptor ring */
512 fm10k_write_reg(hw, FM10K_TDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
513 fm10k_write_reg(hw, FM10K_TDBAH(reg_idx), tdba >> 32);
514 fm10k_write_reg(hw, FM10K_TDLEN(reg_idx), size);
515
516 /* reset head and tail pointers */
517 fm10k_write_reg(hw, FM10K_TDH(reg_idx), 0);
518 fm10k_write_reg(hw, FM10K_TDT(reg_idx), 0);
519
520 /* store tail pointer */
521 ring->tail = &interface->uc_addr[FM10K_TDT(reg_idx)];
522
523 /* reset ntu and ntc to place SW in sync with hardwdare */
524 ring->next_to_clean = 0;
525 ring->next_to_use = 0;
526
527 /* Map interrupt */
528 if (ring->q_vector) {
529 txint = ring->q_vector->v_idx + NON_Q_VECTORS(hw);
530 txint |= FM10K_INT_MAP_TIMER0;
531 }
532
533 fm10k_write_reg(hw, FM10K_TXINT(reg_idx), txint);
534
535 /* enable use of FTAG bit in Tx descriptor, register is RO for VF */
536 fm10k_write_reg(hw, FM10K_PFVTCTL(reg_idx),
537 FM10K_PFVTCTL_FTAG_DESC_ENABLE);
538
539 /* enable queue */
540 fm10k_write_reg(hw, FM10K_TXDCTL(reg_idx), txdctl);
541}
542
543/**
544 * fm10k_enable_tx_ring - Verify Tx ring is enabled after configuration
545 * @interface: board private structure
546 * @ring: structure containing ring specific data
547 *
548 * Verify the Tx descriptor ring is ready for transmit.
549 **/
550static void fm10k_enable_tx_ring(struct fm10k_intfc *interface,
551 struct fm10k_ring *ring)
552{
553 struct fm10k_hw *hw = &interface->hw;
554 int wait_loop = 10;
555 u32 txdctl;
556 u8 reg_idx = ring->reg_idx;
557
558 /* if we are already enabled just exit */
559 if (fm10k_read_reg(hw, FM10K_TXDCTL(reg_idx)) & FM10K_TXDCTL_ENABLE)
560 return;
561
562 /* poll to verify queue is enabled */
563 do {
564 usleep_range(1000, 2000);
565 txdctl = fm10k_read_reg(hw, FM10K_TXDCTL(reg_idx));
566 } while (!(txdctl & FM10K_TXDCTL_ENABLE) && --wait_loop);
567 if (!wait_loop)
568 netif_err(interface, drv, interface->netdev,
569 "Could not enable Tx Queue %d\n", reg_idx);
570}
571
572/**
573 * fm10k_configure_tx - Configure Transmit Unit after Reset
574 * @interface: board private structure
575 *
576 * Configure the Tx unit of the MAC after a reset.
577 **/
578static void fm10k_configure_tx(struct fm10k_intfc *interface)
579{
580 int i;
581
582 /* Setup the HW Tx Head and Tail descriptor pointers */
583 for (i = 0; i < interface->num_tx_queues; i++)
584 fm10k_configure_tx_ring(interface, interface->tx_ring[i]);
585
586 /* poll here to verify that Tx rings are now enabled */
587 for (i = 0; i < interface->num_tx_queues; i++)
588 fm10k_enable_tx_ring(interface, interface->tx_ring[i]);
589}
590
591/**
592 * fm10k_configure_rx_ring - Configure Rx ring after Reset
593 * @interface: board private structure
594 * @ring: structure containing ring specific data
595 *
596 * Configure the Rx descriptor ring after a reset.
597 **/
598static void fm10k_configure_rx_ring(struct fm10k_intfc *interface,
599 struct fm10k_ring *ring)
600{
601 u64 rdba = ring->dma;
602 struct fm10k_hw *hw = &interface->hw;
603 u32 size = ring->count * sizeof(union fm10k_rx_desc);
604 u32 rxqctl = FM10K_RXQCTL_ENABLE | FM10K_RXQCTL_PF;
605 u32 rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
606 u32 srrctl = FM10K_SRRCTL_BUFFER_CHAINING_EN;
607 u32 rxint = FM10K_INT_MAP_DISABLE;
608 u8 rx_pause = interface->rx_pause;
609 u8 reg_idx = ring->reg_idx;
610
611 /* disable queue to avoid issues while updating state */
612 fm10k_write_reg(hw, FM10K_RXQCTL(reg_idx), 0);
613 fm10k_write_flush(hw);
614
615 /* possible poll here to verify ring resources have been cleaned */
616
617 /* set location and size for descriptor ring */
618 fm10k_write_reg(hw, FM10K_RDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
619 fm10k_write_reg(hw, FM10K_RDBAH(reg_idx), rdba >> 32);
620 fm10k_write_reg(hw, FM10K_RDLEN(reg_idx), size);
621
622 /* reset head and tail pointers */
623 fm10k_write_reg(hw, FM10K_RDH(reg_idx), 0);
624 fm10k_write_reg(hw, FM10K_RDT(reg_idx), 0);
625
626 /* store tail pointer */
627 ring->tail = &interface->uc_addr[FM10K_RDT(reg_idx)];
628
629 /* reset ntu and ntc to place SW in sync with hardwdare */
630 ring->next_to_clean = 0;
631 ring->next_to_use = 0;
632 ring->next_to_alloc = 0;
633
634 /* Configure the Rx buffer size for one buff without split */
635 srrctl |= FM10K_RX_BUFSZ >> FM10K_SRRCTL_BSIZEPKT_SHIFT;
636
637 /* Configure the Rx ring to supress loopback packets */
638 srrctl |= FM10K_SRRCTL_LOOPBACK_SUPPRESS;
639 fm10k_write_reg(hw, FM10K_SRRCTL(reg_idx), srrctl);
640
641 /* Enable drop on empty */
642#if defined(HAVE_DCBNL_IEEE) && defined(CONFIG_DCB)
643 if (interface->pfc_en)
644 rx_pause = interface->pfc_en;
645#endif
646 if (!(rx_pause & (1 << ring->qos_pc)))
647 rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
648
649 fm10k_write_reg(hw, FM10K_RXDCTL(reg_idx), rxdctl);
650
651 /* assign default VLAN to queue */
652 ring->vid = hw->mac.default_vid;
653
654 /* Map interrupt */
655 if (ring->q_vector) {
656 rxint = ring->q_vector->v_idx + NON_Q_VECTORS(hw);
657 rxint |= FM10K_INT_MAP_TIMER1;
658 }
659
660 fm10k_write_reg(hw, FM10K_RXINT(reg_idx), rxint);
661
662 /* enable queue */
663 fm10k_write_reg(hw, FM10K_RXQCTL(reg_idx), rxqctl);
Alexander Duyckb101c962014-09-20 19:50:03 -0400664
665 /* place buffers on ring for receive data */
666 fm10k_alloc_rx_buffers(ring, fm10k_desc_unused(ring));
Alexander Duyck3abaae42014-09-20 19:49:43 -0400667}
668
669/**
670 * fm10k_update_rx_drop_en - Configures the drop enable bits for Rx rings
671 * @interface: board private structure
672 *
673 * Configure the drop enable bits for the Rx rings.
674 **/
675void fm10k_update_rx_drop_en(struct fm10k_intfc *interface)
676{
677 struct fm10k_hw *hw = &interface->hw;
678 u8 rx_pause = interface->rx_pause;
679 int i;
680
681#if defined(HAVE_DCBNL_IEEE) && defined(CONFIG_DCB)
682 if (interface->pfc_en)
683 rx_pause = interface->pfc_en;
684
685#endif
686 for (i = 0; i < interface->num_rx_queues; i++) {
687 struct fm10k_ring *ring = interface->rx_ring[i];
688 u32 rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
689 u8 reg_idx = ring->reg_idx;
690
691 if (!(rx_pause & (1 << ring->qos_pc)))
692 rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
693
694 fm10k_write_reg(hw, FM10K_RXDCTL(reg_idx), rxdctl);
695 }
696}
697
698/**
699 * fm10k_configure_dglort - Configure Receive DGLORT after reset
700 * @interface: board private structure
701 *
702 * Configure the DGLORT description and RSS tables.
703 **/
704static void fm10k_configure_dglort(struct fm10k_intfc *interface)
705{
706 struct fm10k_dglort_cfg dglort = { 0 };
707 struct fm10k_hw *hw = &interface->hw;
708 int i;
709 u32 mrqc;
710
711 /* Fill out hash function seeds */
712 for (i = 0; i < FM10K_RSSRK_SIZE; i++)
713 fm10k_write_reg(hw, FM10K_RSSRK(0, i), interface->rssrk[i]);
714
715 /* Write RETA table to hardware */
716 for (i = 0; i < FM10K_RETA_SIZE; i++)
717 fm10k_write_reg(hw, FM10K_RETA(0, i), interface->reta[i]);
718
719 /* Generate RSS hash based on packet types, TCP/UDP
720 * port numbers and/or IPv4/v6 src and dst addresses
721 */
722 mrqc = FM10K_MRQC_IPV4 |
723 FM10K_MRQC_TCP_IPV4 |
724 FM10K_MRQC_IPV6 |
725 FM10K_MRQC_TCP_IPV6;
726
727 if (interface->flags & FM10K_FLAG_RSS_FIELD_IPV4_UDP)
728 mrqc |= FM10K_MRQC_UDP_IPV4;
729 if (interface->flags & FM10K_FLAG_RSS_FIELD_IPV6_UDP)
730 mrqc |= FM10K_MRQC_UDP_IPV6;
731
732 fm10k_write_reg(hw, FM10K_MRQC(0), mrqc);
733
734 /* configure default DGLORT mapping for RSS/DCB */
735 dglort.inner_rss = 1;
736 dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask);
737 dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask);
738 hw->mac.ops.configure_dglort_map(hw, &dglort);
739
740 /* assign GLORT per queue for queue mapped testing */
741 if (interface->glort_count > 64) {
742 memset(&dglort, 0, sizeof(dglort));
743 dglort.inner_rss = 1;
744 dglort.glort = interface->glort + 64;
745 dglort.idx = fm10k_dglort_pf_queue;
746 dglort.queue_l = fls(interface->num_rx_queues - 1);
747 hw->mac.ops.configure_dglort_map(hw, &dglort);
748 }
749
750 /* assign glort value for RSS/DCB specific to this interface */
751 memset(&dglort, 0, sizeof(dglort));
752 dglort.inner_rss = 1;
753 dglort.glort = interface->glort;
754 dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask);
755 dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask);
756 /* configure DGLORT mapping for RSS/DCB */
757 dglort.idx = fm10k_dglort_pf_rss;
758 hw->mac.ops.configure_dglort_map(hw, &dglort);
759}
760
761/**
762 * fm10k_configure_rx - Configure Receive Unit after Reset
763 * @interface: board private structure
764 *
765 * Configure the Rx unit of the MAC after a reset.
766 **/
767static void fm10k_configure_rx(struct fm10k_intfc *interface)
768{
769 int i;
770
771 /* Configure SWPRI to PC map */
772 fm10k_configure_swpri_map(interface);
773
774 /* Configure RSS and DGLORT map */
775 fm10k_configure_dglort(interface);
776
777 /* Setup the HW Rx Head and Tail descriptor pointers */
778 for (i = 0; i < interface->num_rx_queues; i++)
779 fm10k_configure_rx_ring(interface, interface->rx_ring[i]);
780
781 /* possible poll here to verify that Rx rings are now enabled */
782}
783
Alexander Duyck18283ca2014-09-20 19:48:51 -0400784static void fm10k_napi_enable_all(struct fm10k_intfc *interface)
785{
786 struct fm10k_q_vector *q_vector;
787 int q_idx;
788
789 for (q_idx = 0; q_idx < interface->num_q_vectors; q_idx++) {
790 q_vector = interface->q_vector[q_idx];
791 napi_enable(&q_vector->napi);
792 }
793}
794
795static irqreturn_t fm10k_msix_clean_rings(int irq, void *data)
796{
797 struct fm10k_q_vector *q_vector = data;
798
799 if (q_vector->rx.count || q_vector->tx.count)
800 napi_schedule(&q_vector->napi);
801
802 return IRQ_HANDLED;
803}
804
805#define FM10K_ERR_MSG(type) case (type): error = #type; break
806static void fm10k_print_fault(struct fm10k_intfc *interface, int type,
807 struct fm10k_fault *fault)
808{
809 struct pci_dev *pdev = interface->pdev;
810 char *error;
811
812 switch (type) {
813 case FM10K_PCA_FAULT:
814 switch (fault->type) {
815 default:
816 error = "Unknown PCA error";
817 break;
818 FM10K_ERR_MSG(PCA_NO_FAULT);
819 FM10K_ERR_MSG(PCA_UNMAPPED_ADDR);
820 FM10K_ERR_MSG(PCA_BAD_QACCESS_PF);
821 FM10K_ERR_MSG(PCA_BAD_QACCESS_VF);
822 FM10K_ERR_MSG(PCA_MALICIOUS_REQ);
823 FM10K_ERR_MSG(PCA_POISONED_TLP);
824 FM10K_ERR_MSG(PCA_TLP_ABORT);
825 }
826 break;
827 case FM10K_THI_FAULT:
828 switch (fault->type) {
829 default:
830 error = "Unknown THI error";
831 break;
832 FM10K_ERR_MSG(THI_NO_FAULT);
833 FM10K_ERR_MSG(THI_MAL_DIS_Q_FAULT);
834 }
835 break;
836 case FM10K_FUM_FAULT:
837 switch (fault->type) {
838 default:
839 error = "Unknown FUM error";
840 break;
841 FM10K_ERR_MSG(FUM_NO_FAULT);
842 FM10K_ERR_MSG(FUM_UNMAPPED_ADDR);
843 FM10K_ERR_MSG(FUM_BAD_VF_QACCESS);
844 FM10K_ERR_MSG(FUM_ADD_DECODE_ERR);
845 FM10K_ERR_MSG(FUM_RO_ERROR);
846 FM10K_ERR_MSG(FUM_QPRC_CRC_ERROR);
847 FM10K_ERR_MSG(FUM_CSR_TIMEOUT);
848 FM10K_ERR_MSG(FUM_INVALID_TYPE);
849 FM10K_ERR_MSG(FUM_INVALID_LENGTH);
850 FM10K_ERR_MSG(FUM_INVALID_BE);
851 FM10K_ERR_MSG(FUM_INVALID_ALIGN);
852 }
853 break;
854 default:
855 error = "Undocumented fault";
856 break;
857 }
858
859 dev_warn(&pdev->dev,
860 "%s Address: 0x%llx SpecInfo: 0x%x Func: %02x.%0x\n",
861 error, fault->address, fault->specinfo,
862 PCI_SLOT(fault->func), PCI_FUNC(fault->func));
863}
864
865static void fm10k_report_fault(struct fm10k_intfc *interface, u32 eicr)
866{
867 struct fm10k_hw *hw = &interface->hw;
868 struct fm10k_fault fault = { 0 };
869 int type, err;
870
871 for (eicr &= FM10K_EICR_FAULT_MASK, type = FM10K_PCA_FAULT;
872 eicr;
873 eicr >>= 1, type += FM10K_FAULT_SIZE) {
874 /* only check if there is an error reported */
875 if (!(eicr & 0x1))
876 continue;
877
878 /* retrieve fault info */
879 err = hw->mac.ops.get_fault(hw, type, &fault);
880 if (err) {
881 dev_err(&interface->pdev->dev,
882 "error reading fault\n");
883 continue;
884 }
885
886 fm10k_print_fault(interface, type, &fault);
887 }
888}
889
890static void fm10k_reset_drop_on_empty(struct fm10k_intfc *interface, u32 eicr)
891{
892 struct fm10k_hw *hw = &interface->hw;
893 const u32 rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
894 u32 maxholdq;
895 int q;
896
897 if (!(eicr & FM10K_EICR_MAXHOLDTIME))
898 return;
899
900 maxholdq = fm10k_read_reg(hw, FM10K_MAXHOLDQ(7));
901 if (maxholdq)
902 fm10k_write_reg(hw, FM10K_MAXHOLDQ(7), maxholdq);
903 for (q = 255;;) {
904 if (maxholdq & (1 << 31)) {
905 if (q < FM10K_MAX_QUEUES_PF) {
906 interface->rx_overrun_pf++;
907 fm10k_write_reg(hw, FM10K_RXDCTL(q), rxdctl);
908 } else {
909 interface->rx_overrun_vf++;
910 }
911 }
912
913 maxholdq *= 2;
914 if (!maxholdq)
915 q &= ~(32 - 1);
916
917 if (!q)
918 break;
919
920 if (q-- % 32)
921 continue;
922
923 maxholdq = fm10k_read_reg(hw, FM10K_MAXHOLDQ(q / 32));
924 if (maxholdq)
925 fm10k_write_reg(hw, FM10K_MAXHOLDQ(q / 32), maxholdq);
926 }
927}
928
929static irqreturn_t fm10k_msix_mbx_pf(int irq, void *data)
930{
931 struct fm10k_intfc *interface = data;
932 struct fm10k_hw *hw = &interface->hw;
933 struct fm10k_mbx_info *mbx = &hw->mbx;
934 u32 eicr;
935
936 /* unmask any set bits related to this interrupt */
937 eicr = fm10k_read_reg(hw, FM10K_EICR);
938 fm10k_write_reg(hw, FM10K_EICR, eicr & (FM10K_EICR_MAILBOX |
939 FM10K_EICR_SWITCHREADY |
940 FM10K_EICR_SWITCHNOTREADY));
941
942 /* report any faults found to the message log */
943 fm10k_report_fault(interface, eicr);
944
945 /* reset any queues disabled due to receiver overrun */
946 fm10k_reset_drop_on_empty(interface, eicr);
947
948 /* service mailboxes */
949 if (fm10k_mbx_trylock(interface)) {
950 mbx->ops.process(hw, mbx);
951 fm10k_mbx_unlock(interface);
952 }
953
Alexander Duyckb7d85142014-09-20 19:49:25 -0400954 /* if switch toggled state we should reset GLORTs */
955 if (eicr & FM10K_EICR_SWITCHNOTREADY) {
956 /* force link down for at least 4 seconds */
957 interface->link_down_event = jiffies + (4 * HZ);
958 set_bit(__FM10K_LINK_DOWN, &interface->state);
959
960 /* reset dglort_map back to no config */
961 hw->mac.dglort_map = FM10K_DGLORTMAP_NONE;
962 }
963
964 /* we should validate host state after interrupt event */
965 hw->mac.get_host_state = 1;
966 fm10k_service_event_schedule(interface);
967
Alexander Duyck18283ca2014-09-20 19:48:51 -0400968 /* re-enable mailbox interrupt and indicate 20us delay */
969 fm10k_write_reg(hw, FM10K_ITR(FM10K_MBX_VECTOR),
970 FM10K_ITR_ENABLE | FM10K_MBX_INT_DELAY);
971
972 return IRQ_HANDLED;
973}
974
975void fm10k_mbx_free_irq(struct fm10k_intfc *interface)
976{
977 struct msix_entry *entry = &interface->msix_entries[FM10K_MBX_VECTOR];
978 struct fm10k_hw *hw = &interface->hw;
979 int itr_reg;
980
981 /* disconnect the mailbox */
982 hw->mbx.ops.disconnect(hw, &hw->mbx);
983
984 /* disable Mailbox cause */
985 if (hw->mac.type == fm10k_mac_pf) {
986 fm10k_write_reg(hw, FM10K_EIMR,
987 FM10K_EIMR_DISABLE(PCA_FAULT) |
988 FM10K_EIMR_DISABLE(FUM_FAULT) |
989 FM10K_EIMR_DISABLE(MAILBOX) |
990 FM10K_EIMR_DISABLE(SWITCHREADY) |
991 FM10K_EIMR_DISABLE(SWITCHNOTREADY) |
992 FM10K_EIMR_DISABLE(SRAMERROR) |
993 FM10K_EIMR_DISABLE(VFLR) |
994 FM10K_EIMR_DISABLE(MAXHOLDTIME));
995 itr_reg = FM10K_ITR(FM10K_MBX_VECTOR);
996 }
997
998 fm10k_write_reg(hw, itr_reg, FM10K_ITR_MASK_SET);
999
1000 free_irq(entry->vector, interface);
1001}
1002
1003/* generic error handler for mailbox issues */
1004static s32 fm10k_mbx_error(struct fm10k_hw *hw, u32 **results,
1005 struct fm10k_mbx_info *mbx)
1006{
1007 struct fm10k_intfc *interface;
1008 struct pci_dev *pdev;
1009
1010 interface = container_of(hw, struct fm10k_intfc, hw);
1011 pdev = interface->pdev;
1012
1013 dev_err(&pdev->dev, "Unknown message ID %u\n",
1014 **results & FM10K_TLV_ID_MASK);
1015
1016 return 0;
1017}
1018
1019static s32 fm10k_lport_map(struct fm10k_hw *hw, u32 **results,
1020 struct fm10k_mbx_info *mbx)
1021{
1022 struct fm10k_intfc *interface;
1023 u32 dglort_map = hw->mac.dglort_map;
1024 s32 err;
1025
1026 err = fm10k_msg_lport_map_pf(hw, results, mbx);
1027 if (err)
1028 return err;
1029
1030 interface = container_of(hw, struct fm10k_intfc, hw);
1031
1032 /* we need to reset if port count was just updated */
1033 if (dglort_map != hw->mac.dglort_map)
1034 interface->flags |= FM10K_FLAG_RESET_REQUESTED;
1035
1036 return 0;
1037}
1038
1039static s32 fm10k_update_pvid(struct fm10k_hw *hw, u32 **results,
1040 struct fm10k_mbx_info *mbx)
1041{
1042 struct fm10k_intfc *interface;
1043 u16 glort, pvid;
1044 u32 pvid_update;
1045 s32 err;
1046
1047 err = fm10k_tlv_attr_get_u32(results[FM10K_PF_ATTR_ID_UPDATE_PVID],
1048 &pvid_update);
1049 if (err)
1050 return err;
1051
1052 /* extract values from the pvid update */
1053 glort = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_GLORT);
1054 pvid = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_PVID);
1055
1056 /* if glort is not valid return error */
1057 if (!fm10k_glort_valid_pf(hw, glort))
1058 return FM10K_ERR_PARAM;
1059
1060 /* verify VID is valid */
1061 if (pvid >= FM10K_VLAN_TABLE_VID_MAX)
1062 return FM10K_ERR_PARAM;
1063
1064 interface = container_of(hw, struct fm10k_intfc, hw);
1065
1066 /* we need to reset if default VLAN was just updated */
1067 if (pvid != hw->mac.default_vid)
1068 interface->flags |= FM10K_FLAG_RESET_REQUESTED;
1069
1070 hw->mac.default_vid = pvid;
1071
1072 return 0;
1073}
1074
1075static const struct fm10k_msg_data pf_mbx_data[] = {
1076 FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
1077 FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
1078 FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_lport_map),
1079 FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
1080 FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
1081 FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_update_pvid),
1082 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_mbx_error),
1083};
1084
1085static int fm10k_mbx_request_irq_pf(struct fm10k_intfc *interface)
1086{
1087 struct msix_entry *entry = &interface->msix_entries[FM10K_MBX_VECTOR];
1088 struct net_device *dev = interface->netdev;
1089 struct fm10k_hw *hw = &interface->hw;
1090 int err;
1091
1092 /* Use timer0 for interrupt moderation on the mailbox */
1093 u32 mbx_itr = FM10K_INT_MAP_TIMER0 | entry->entry;
1094 u32 other_itr = FM10K_INT_MAP_IMMEDIATE | entry->entry;
1095
1096 /* register mailbox handlers */
1097 err = hw->mbx.ops.register_handlers(&hw->mbx, pf_mbx_data);
1098 if (err)
1099 return err;
1100
1101 /* request the IRQ */
1102 err = request_irq(entry->vector, fm10k_msix_mbx_pf, 0,
1103 dev->name, interface);
1104 if (err) {
1105 netif_err(interface, probe, dev,
1106 "request_irq for msix_mbx failed: %d\n", err);
1107 return err;
1108 }
1109
1110 /* Enable interrupts w/ no moderation for "other" interrupts */
1111 fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_PCIeFault), other_itr);
1112 fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_SwitchUpDown), other_itr);
1113 fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_SRAM), other_itr);
1114 fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_MaxHoldTime), other_itr);
1115 fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_VFLR), other_itr);
1116
1117 /* Enable interrupts w/ moderation for mailbox */
1118 fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_Mailbox), mbx_itr);
1119
1120 /* Enable individual interrupt causes */
1121 fm10k_write_reg(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) |
1122 FM10K_EIMR_ENABLE(FUM_FAULT) |
1123 FM10K_EIMR_ENABLE(MAILBOX) |
1124 FM10K_EIMR_ENABLE(SWITCHREADY) |
1125 FM10K_EIMR_ENABLE(SWITCHNOTREADY) |
1126 FM10K_EIMR_ENABLE(SRAMERROR) |
1127 FM10K_EIMR_ENABLE(VFLR) |
1128 FM10K_EIMR_ENABLE(MAXHOLDTIME));
1129
1130 /* enable interrupt */
1131 fm10k_write_reg(hw, FM10K_ITR(entry->entry), FM10K_ITR_ENABLE);
1132
1133 return 0;
1134}
1135
1136int fm10k_mbx_request_irq(struct fm10k_intfc *interface)
1137{
1138 struct fm10k_hw *hw = &interface->hw;
1139 int err;
1140
1141 /* enable Mailbox cause */
1142 err = fm10k_mbx_request_irq_pf(interface);
1143
1144 /* connect mailbox */
1145 if (!err)
1146 err = hw->mbx.ops.connect(hw, &hw->mbx);
1147
1148 return err;
1149}
1150
1151/**
1152 * fm10k_qv_free_irq - release interrupts associated with queue vectors
1153 * @interface: board private structure
1154 *
1155 * Release all interrupts associated with this interface
1156 **/
1157void fm10k_qv_free_irq(struct fm10k_intfc *interface)
1158{
1159 int vector = interface->num_q_vectors;
1160 struct fm10k_hw *hw = &interface->hw;
1161 struct msix_entry *entry;
1162
1163 entry = &interface->msix_entries[NON_Q_VECTORS(hw) + vector];
1164
1165 while (vector) {
1166 struct fm10k_q_vector *q_vector;
1167
1168 vector--;
1169 entry--;
1170 q_vector = interface->q_vector[vector];
1171
1172 if (!q_vector->tx.count && !q_vector->rx.count)
1173 continue;
1174
1175 /* disable interrupts */
1176
1177 writel(FM10K_ITR_MASK_SET, q_vector->itr);
1178
1179 free_irq(entry->vector, q_vector);
1180 }
1181}
1182
1183/**
1184 * fm10k_qv_request_irq - initialize interrupts for queue vectors
1185 * @interface: board private structure
1186 *
1187 * Attempts to configure interrupts using the best available
1188 * capabilities of the hardware and kernel.
1189 **/
1190int fm10k_qv_request_irq(struct fm10k_intfc *interface)
1191{
1192 struct net_device *dev = interface->netdev;
1193 struct fm10k_hw *hw = &interface->hw;
1194 struct msix_entry *entry;
1195 int ri = 0, ti = 0;
1196 int vector, err;
1197
1198 entry = &interface->msix_entries[NON_Q_VECTORS(hw)];
1199
1200 for (vector = 0; vector < interface->num_q_vectors; vector++) {
1201 struct fm10k_q_vector *q_vector = interface->q_vector[vector];
1202
1203 /* name the vector */
1204 if (q_vector->tx.count && q_vector->rx.count) {
1205 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1206 "%s-TxRx-%d", dev->name, ri++);
1207 ti++;
1208 } else if (q_vector->rx.count) {
1209 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1210 "%s-rx-%d", dev->name, ri++);
1211 } else if (q_vector->tx.count) {
1212 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1213 "%s-tx-%d", dev->name, ti++);
1214 } else {
1215 /* skip this unused q_vector */
1216 continue;
1217 }
1218
1219 /* Assign ITR register to q_vector */
1220 q_vector->itr = &interface->uc_addr[FM10K_ITR(entry->entry)];
1221
1222 /* request the IRQ */
1223 err = request_irq(entry->vector, &fm10k_msix_clean_rings, 0,
1224 q_vector->name, q_vector);
1225 if (err) {
1226 netif_err(interface, probe, dev,
1227 "request_irq failed for MSIX interrupt Error: %d\n",
1228 err);
1229 goto err_out;
1230 }
1231
1232 /* Enable q_vector */
1233 writel(FM10K_ITR_ENABLE, q_vector->itr);
1234
1235 entry++;
1236 }
1237
1238 return 0;
1239
1240err_out:
1241 /* wind through the ring freeing all entries and vectors */
1242 while (vector) {
1243 struct fm10k_q_vector *q_vector;
1244
1245 entry--;
1246 vector--;
1247 q_vector = interface->q_vector[vector];
1248
1249 if (!q_vector->tx.count && !q_vector->rx.count)
1250 continue;
1251
1252 /* disable interrupts */
1253
1254 writel(FM10K_ITR_MASK_SET, q_vector->itr);
1255
1256 free_irq(entry->vector, q_vector);
1257 }
1258
1259 return err;
1260}
1261
Alexander Duyck504c5ea2014-09-20 19:48:29 -04001262void fm10k_up(struct fm10k_intfc *interface)
1263{
1264 struct fm10k_hw *hw = &interface->hw;
1265
1266 /* Enable Tx/Rx DMA */
1267 hw->mac.ops.start_hw(hw);
1268
Alexander Duyck3abaae42014-09-20 19:49:43 -04001269 /* configure Tx descriptor rings */
1270 fm10k_configure_tx(interface);
1271
1272 /* configure Rx descriptor rings */
1273 fm10k_configure_rx(interface);
1274
Alexander Duyck504c5ea2014-09-20 19:48:29 -04001275 /* configure interrupts */
1276 hw->mac.ops.update_int_moderator(hw);
1277
1278 /* clear down bit to indicate we are ready to go */
1279 clear_bit(__FM10K_DOWN, &interface->state);
1280
Alexander Duyck18283ca2014-09-20 19:48:51 -04001281 /* enable polling cleanups */
1282 fm10k_napi_enable_all(interface);
1283
Alexander Duyck504c5ea2014-09-20 19:48:29 -04001284 /* re-establish Rx filters */
1285 fm10k_restore_rx_state(interface);
1286
1287 /* enable transmits */
1288 netif_tx_start_all_queues(interface->netdev);
Alexander Duyckb7d85142014-09-20 19:49:25 -04001289
1290 /* kick off the service timer */
1291 mod_timer(&interface->service_timer, jiffies);
Alexander Duyck504c5ea2014-09-20 19:48:29 -04001292}
1293
Alexander Duyck18283ca2014-09-20 19:48:51 -04001294static void fm10k_napi_disable_all(struct fm10k_intfc *interface)
1295{
1296 struct fm10k_q_vector *q_vector;
1297 int q_idx;
1298
1299 for (q_idx = 0; q_idx < interface->num_q_vectors; q_idx++) {
1300 q_vector = interface->q_vector[q_idx];
1301 napi_disable(&q_vector->napi);
1302 }
1303}
1304
Alexander Duyck504c5ea2014-09-20 19:48:29 -04001305void fm10k_down(struct fm10k_intfc *interface)
1306{
1307 struct net_device *netdev = interface->netdev;
1308 struct fm10k_hw *hw = &interface->hw;
1309
1310 /* signal that we are down to the interrupt handler and service task */
1311 set_bit(__FM10K_DOWN, &interface->state);
1312
1313 /* call carrier off first to avoid false dev_watchdog timeouts */
1314 netif_carrier_off(netdev);
1315
1316 /* disable transmits */
1317 netif_tx_stop_all_queues(netdev);
1318 netif_tx_disable(netdev);
1319
1320 /* reset Rx filters */
1321 fm10k_reset_rx_state(interface);
1322
1323 /* allow 10ms for device to quiesce */
1324 usleep_range(10000, 20000);
1325
Alexander Duyck18283ca2014-09-20 19:48:51 -04001326 /* disable polling routines */
1327 fm10k_napi_disable_all(interface);
1328
Alexander Duyckb7d85142014-09-20 19:49:25 -04001329 del_timer_sync(&interface->service_timer);
1330
1331 /* capture stats one last time before stopping interface */
1332 fm10k_update_stats(interface);
1333
Alexander Duyck504c5ea2014-09-20 19:48:29 -04001334 /* Disable DMA engine for Tx/Rx */
1335 hw->mac.ops.stop_hw(hw);
Alexander Duyck3abaae42014-09-20 19:49:43 -04001336
1337 /* free any buffers still on the rings */
1338 fm10k_clean_all_tx_rings(interface);
Alexander Duyck504c5ea2014-09-20 19:48:29 -04001339}
1340
Alexander Duyck0e7b3642014-09-20 19:48:10 -04001341/**
1342 * fm10k_sw_init - Initialize general software structures
1343 * @interface: host interface private structure to initialize
1344 *
1345 * fm10k_sw_init initializes the interface private data structure.
1346 * Fields are initialized based on PCI device information and
1347 * OS network device settings (MTU size).
1348 **/
1349static int fm10k_sw_init(struct fm10k_intfc *interface,
1350 const struct pci_device_id *ent)
1351{
1352 static const u32 seed[FM10K_RSSRK_SIZE] = { 0xda565a6d, 0xc20e5b25,
1353 0x3d256741, 0xb08fa343,
1354 0xcb2bcad0, 0xb4307bae,
1355 0xa32dcb77, 0x0cf23080,
1356 0x3bb7426a, 0xfa01acbe };
1357 const struct fm10k_info *fi = fm10k_info_tbl[ent->driver_data];
1358 struct fm10k_hw *hw = &interface->hw;
1359 struct pci_dev *pdev = interface->pdev;
1360 struct net_device *netdev = interface->netdev;
1361 unsigned int rss;
1362 int err;
1363
1364 /* initialize back pointer */
1365 hw->back = interface;
1366 hw->hw_addr = interface->uc_addr;
1367
1368 /* PCI config space info */
1369 hw->vendor_id = pdev->vendor;
1370 hw->device_id = pdev->device;
1371 hw->revision_id = pdev->revision;
1372 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1373 hw->subsystem_device_id = pdev->subsystem_device;
1374
1375 /* Setup hw api */
1376 memcpy(&hw->mac.ops, fi->mac_ops, sizeof(hw->mac.ops));
1377 hw->mac.type = fi->mac;
1378
1379 /* Set common capability flags and settings */
1380 rss = min_t(int, FM10K_MAX_RSS_INDICES, num_online_cpus());
1381 interface->ring_feature[RING_F_RSS].limit = rss;
1382 fi->get_invariants(hw);
1383
1384 /* pick up the PCIe bus settings for reporting later */
1385 if (hw->mac.ops.get_bus_info)
1386 hw->mac.ops.get_bus_info(hw);
1387
1388 /* limit the usable DMA range */
1389 if (hw->mac.ops.set_dma_mask)
1390 hw->mac.ops.set_dma_mask(hw, dma_get_mask(&pdev->dev));
1391
1392 /* update netdev with DMA restrictions */
1393 if (dma_get_mask(&pdev->dev) > DMA_BIT_MASK(32)) {
1394 netdev->features |= NETIF_F_HIGHDMA;
1395 netdev->vlan_features |= NETIF_F_HIGHDMA;
1396 }
1397
Alexander Duyckb7d85142014-09-20 19:49:25 -04001398 /* delay any future reset requests */
1399 interface->last_reset = jiffies + (10 * HZ);
1400
Alexander Duyck0e7b3642014-09-20 19:48:10 -04001401 /* reset and initialize the hardware so it is in a known state */
1402 err = hw->mac.ops.reset_hw(hw) ? : hw->mac.ops.init_hw(hw);
1403 if (err) {
1404 dev_err(&pdev->dev, "init_hw failed: %d\n", err);
1405 return err;
1406 }
1407
1408 /* initialize hardware statistics */
1409 hw->mac.ops.update_hw_stats(hw, &interface->stats);
1410
1411 /* Start with random Ethernet address */
1412 eth_random_addr(hw->mac.addr);
1413
1414 /* Initialize MAC address from hardware */
1415 err = hw->mac.ops.read_mac_addr(hw);
1416 if (err) {
1417 dev_warn(&pdev->dev,
1418 "Failed to obtain MAC address defaulting to random\n");
1419 /* tag address assignment as random */
1420 netdev->addr_assign_type |= NET_ADDR_RANDOM;
1421 }
1422
1423 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
1424 memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
1425
1426 if (!is_valid_ether_addr(netdev->perm_addr)) {
1427 dev_err(&pdev->dev, "Invalid MAC Address\n");
1428 return -EIO;
1429 }
1430
1431 /* Only the PF can support VXLAN and NVGRE offloads */
1432 if (hw->mac.type != fm10k_mac_pf) {
1433 netdev->hw_enc_features = 0;
1434 netdev->features &= ~NETIF_F_GSO_UDP_TUNNEL;
1435 netdev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL;
1436 }
1437
Alexander Duyckb7d85142014-09-20 19:49:25 -04001438 /* Initialize service timer and service task */
1439 set_bit(__FM10K_SERVICE_DISABLE, &interface->state);
1440 setup_timer(&interface->service_timer, &fm10k_service_timer,
1441 (unsigned long)interface);
1442 INIT_WORK(&interface->service_task, fm10k_service_task);
1443
Alexander Duycke27ef592014-09-20 19:49:03 -04001444 /* set default ring sizes */
1445 interface->tx_ring_count = FM10K_DEFAULT_TXD;
1446 interface->rx_ring_count = FM10K_DEFAULT_RXD;
1447
Alexander Duyck18283ca2014-09-20 19:48:51 -04001448 /* set default interrupt moderation */
1449 interface->tx_itr = FM10K_ITR_10K;
1450 interface->rx_itr = FM10K_ITR_ADAPTIVE | FM10K_ITR_20K;
1451
Alexander Duyck0e7b3642014-09-20 19:48:10 -04001452 /* initialize vxlan_port list */
1453 INIT_LIST_HEAD(&interface->vxlan_port);
1454
1455 /* initialize RSS key */
1456 memcpy(interface->rssrk, seed, sizeof(seed));
1457
1458 /* Start off interface as being down */
1459 set_bit(__FM10K_DOWN, &interface->state);
1460
1461 return 0;
1462}
1463
1464static void fm10k_slot_warn(struct fm10k_intfc *interface)
1465{
1466 struct device *dev = &interface->pdev->dev;
1467 struct fm10k_hw *hw = &interface->hw;
1468
1469 if (hw->mac.ops.is_slot_appropriate(hw))
1470 return;
1471
1472 dev_warn(dev,
1473 "For optimal performance, a %s %s slot is recommended.\n",
1474 (hw->bus_caps.width == fm10k_bus_width_pcie_x1 ? "x1" :
1475 hw->bus_caps.width == fm10k_bus_width_pcie_x4 ? "x4" :
1476 "x8"),
1477 (hw->bus_caps.speed == fm10k_bus_speed_2500 ? "2.5GT/s" :
1478 hw->bus_caps.speed == fm10k_bus_speed_5000 ? "5.0GT/s" :
1479 "8.0GT/s"));
1480 dev_warn(dev,
1481 "A slot with more lanes and/or higher speed is suggested.\n");
1482}
1483
Alexander Duyckb3890e32014-09-20 19:46:05 -04001484/**
1485 * fm10k_probe - Device Initialization Routine
1486 * @pdev: PCI device information struct
1487 * @ent: entry in fm10k_pci_tbl
1488 *
1489 * Returns 0 on success, negative on failure
1490 *
1491 * fm10k_probe initializes an interface identified by a pci_dev structure.
1492 * The OS initialization, configuring of the interface private structure,
1493 * and a hardware reset occur.
1494 **/
1495static int fm10k_probe(struct pci_dev *pdev,
1496 const struct pci_device_id *ent)
1497{
Alexander Duyck0e7b3642014-09-20 19:48:10 -04001498 struct net_device *netdev;
1499 struct fm10k_intfc *interface;
1500 struct fm10k_hw *hw;
Alexander Duyckb3890e32014-09-20 19:46:05 -04001501 int err;
1502 u64 dma_mask;
1503
1504 err = pci_enable_device_mem(pdev);
1505 if (err)
1506 return err;
1507
1508 /* By default fm10k only supports a 48 bit DMA mask */
1509 dma_mask = DMA_BIT_MASK(48) | dma_get_required_mask(&pdev->dev);
1510
1511 if ((dma_mask <= DMA_BIT_MASK(32)) ||
1512 dma_set_mask_and_coherent(&pdev->dev, dma_mask)) {
1513 dma_mask &= DMA_BIT_MASK(32);
1514
1515 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1516 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1517 if (err) {
1518 err = dma_set_coherent_mask(&pdev->dev,
1519 DMA_BIT_MASK(32));
1520 if (err) {
1521 dev_err(&pdev->dev,
1522 "No usable DMA configuration, aborting\n");
1523 goto err_dma;
1524 }
1525 }
1526 }
1527
1528 err = pci_request_selected_regions(pdev,
1529 pci_select_bars(pdev,
1530 IORESOURCE_MEM),
1531 fm10k_driver_name);
1532 if (err) {
1533 dev_err(&pdev->dev,
1534 "pci_request_selected_regions failed 0x%x\n", err);
1535 goto err_pci_reg;
1536 }
1537
1538 pci_set_master(pdev);
1539 pci_save_state(pdev);
1540
Alexander Duyck0e7b3642014-09-20 19:48:10 -04001541 netdev = fm10k_alloc_netdev();
1542 if (!netdev) {
1543 err = -ENOMEM;
1544 goto err_alloc_netdev;
1545 }
1546
1547 SET_NETDEV_DEV(netdev, &pdev->dev);
1548
1549 interface = netdev_priv(netdev);
1550 pci_set_drvdata(pdev, interface);
1551
1552 interface->netdev = netdev;
1553 interface->pdev = pdev;
1554 hw = &interface->hw;
1555
1556 interface->uc_addr = ioremap(pci_resource_start(pdev, 0),
1557 FM10K_UC_ADDR_SIZE);
1558 if (!interface->uc_addr) {
1559 err = -EIO;
1560 goto err_ioremap;
1561 }
1562
1563 err = fm10k_sw_init(interface, ent);
1564 if (err)
1565 goto err_sw_init;
1566
Alexander Duyck18283ca2014-09-20 19:48:51 -04001567 err = fm10k_init_queueing_scheme(interface);
1568 if (err)
1569 goto err_sw_init;
1570
1571 err = fm10k_mbx_request_irq(interface);
1572 if (err)
1573 goto err_mbx_interrupt;
1574
Alexander Duyck0e7b3642014-09-20 19:48:10 -04001575 /* final check of hardware state before registering the interface */
1576 err = fm10k_hw_ready(interface);
1577 if (err)
1578 goto err_register;
1579
1580 err = register_netdev(netdev);
1581 if (err)
1582 goto err_register;
1583
1584 /* carrier off reporting is important to ethtool even BEFORE open */
1585 netif_carrier_off(netdev);
1586
1587 /* stop all the transmit queues from transmitting until link is up */
1588 netif_tx_stop_all_queues(netdev);
1589
1590 /* print bus type/speed/width info */
1591 dev_info(&pdev->dev, "(PCI Express:%s Width: %s Payload: %s)\n",
1592 (hw->bus.speed == fm10k_bus_speed_8000 ? "8.0GT/s" :
1593 hw->bus.speed == fm10k_bus_speed_5000 ? "5.0GT/s" :
1594 hw->bus.speed == fm10k_bus_speed_2500 ? "2.5GT/s" :
1595 "Unknown"),
1596 (hw->bus.width == fm10k_bus_width_pcie_x8 ? "x8" :
1597 hw->bus.width == fm10k_bus_width_pcie_x4 ? "x4" :
1598 hw->bus.width == fm10k_bus_width_pcie_x1 ? "x1" :
1599 "Unknown"),
1600 (hw->bus.payload == fm10k_bus_payload_128 ? "128B" :
1601 hw->bus.payload == fm10k_bus_payload_256 ? "256B" :
1602 hw->bus.payload == fm10k_bus_payload_512 ? "512B" :
1603 "Unknown"));
1604
1605 /* print warning for non-optimal configurations */
1606 fm10k_slot_warn(interface);
1607
Alexander Duyckb7d85142014-09-20 19:49:25 -04001608 /* clear the service task disable bit to allow service task to start */
1609 clear_bit(__FM10K_SERVICE_DISABLE, &interface->state);
1610
Alexander Duyckb3890e32014-09-20 19:46:05 -04001611 return 0;
1612
Alexander Duyck0e7b3642014-09-20 19:48:10 -04001613err_register:
Alexander Duyck18283ca2014-09-20 19:48:51 -04001614 fm10k_mbx_free_irq(interface);
1615err_mbx_interrupt:
1616 fm10k_clear_queueing_scheme(interface);
Alexander Duyck0e7b3642014-09-20 19:48:10 -04001617err_sw_init:
1618 iounmap(interface->uc_addr);
1619err_ioremap:
1620 free_netdev(netdev);
1621err_alloc_netdev:
1622 pci_release_selected_regions(pdev,
1623 pci_select_bars(pdev, IORESOURCE_MEM));
Alexander Duyckb3890e32014-09-20 19:46:05 -04001624err_pci_reg:
1625err_dma:
1626 pci_disable_device(pdev);
1627 return err;
1628}
1629
1630/**
1631 * fm10k_remove - Device Removal Routine
1632 * @pdev: PCI device information struct
1633 *
1634 * fm10k_remove is called by the PCI subsystem to alert the driver
1635 * that it should release a PCI device. The could be caused by a
1636 * Hot-Plug event, or because the driver is going to be removed from
1637 * memory.
1638 **/
1639static void fm10k_remove(struct pci_dev *pdev)
1640{
Alexander Duyck0e7b3642014-09-20 19:48:10 -04001641 struct fm10k_intfc *interface = pci_get_drvdata(pdev);
1642 struct net_device *netdev = interface->netdev;
1643
Alexander Duyckb7d85142014-09-20 19:49:25 -04001644 set_bit(__FM10K_SERVICE_DISABLE, &interface->state);
1645 cancel_work_sync(&interface->service_task);
1646
Alexander Duyck0e7b3642014-09-20 19:48:10 -04001647 /* free netdev, this may bounce the interrupts due to setup_tc */
1648 if (netdev->reg_state == NETREG_REGISTERED)
1649 unregister_netdev(netdev);
1650
Alexander Duyck18283ca2014-09-20 19:48:51 -04001651 /* disable mailbox interrupt */
1652 fm10k_mbx_free_irq(interface);
1653
1654 /* free interrupts */
1655 fm10k_clear_queueing_scheme(interface);
1656
Alexander Duyck0e7b3642014-09-20 19:48:10 -04001657 iounmap(interface->uc_addr);
1658
1659 free_netdev(netdev);
1660
Alexander Duyckb3890e32014-09-20 19:46:05 -04001661 pci_release_selected_regions(pdev,
1662 pci_select_bars(pdev, IORESOURCE_MEM));
1663
1664 pci_disable_device(pdev);
1665}
1666
1667static struct pci_driver fm10k_driver = {
1668 .name = fm10k_driver_name,
1669 .id_table = fm10k_pci_tbl,
1670 .probe = fm10k_probe,
1671 .remove = fm10k_remove,
1672};
1673
1674/**
1675 * fm10k_register_pci_driver - register driver interface
1676 *
1677 * This funciton is called on module load in order to register the driver.
1678 **/
1679int fm10k_register_pci_driver(void)
1680{
1681 return pci_register_driver(&fm10k_driver);
1682}
1683
1684/**
1685 * fm10k_unregister_pci_driver - unregister driver interface
1686 *
1687 * This funciton is called on module unload in order to remove the driver.
1688 **/
1689void fm10k_unregister_pci_driver(void)
1690{
1691 pci_unregister_driver(&fm10k_driver);
1692}