blob: 690535a0322fd762e322ab6e138e593ca443cd38 [file] [log] [blame]
Auke Kok9a799d72007-09-15 14:07:45 -07001/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
Don Skidmore94971822012-01-06 03:24:16 +00004 Copyright(c) 1999 - 2012 Intel Corporation.
Auke Kok9a799d72007-09-15 14:07:45 -07005
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
Auke Kok9a799d72007-09-15 14:07:45 -070023 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/types.h>
29#include <linux/module.h>
30#include <linux/pci.h>
31#include <linux/netdevice.h>
32#include <linux/vmalloc.h>
33#include <linux/string.h>
34#include <linux/in.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000035#include <linux/interrupt.h>
Auke Kok9a799d72007-09-15 14:07:45 -070036#include <linux/ip.h>
37#include <linux/tcp.h>
Alexander Duyck897ab152011-05-27 05:31:47 +000038#include <linux/sctp.h>
Lucy Liu60127862009-07-22 14:07:33 +000039#include <linux/pkt_sched.h>
Auke Kok9a799d72007-09-15 14:07:45 -070040#include <linux/ipv6.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090041#include <linux/slab.h>
Auke Kok9a799d72007-09-15 14:07:45 -070042#include <net/checksum.h>
43#include <net/ip6_checksum.h>
44#include <linux/ethtool.h>
Jiri Pirko01789342011-08-16 06:29:00 +000045#include <linux/if.h>
Auke Kok9a799d72007-09-15 14:07:45 -070046#include <linux/if_vlan.h>
John Fastabend815cccb2012-10-24 08:13:09 +000047#include <linux/if_bridge.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040048#include <linux/prefetch.h>
Yi Zoueacd73f2009-05-13 13:11:06 +000049#include <scsi/fc/fc_fcoe.h>
Auke Kok9a799d72007-09-15 14:07:45 -070050
51#include "ixgbe.h"
52#include "ixgbe_common.h"
Don Skidmoreee5f7842009-11-06 12:56:20 +000053#include "ixgbe_dcb_82599.h"
Greg Rose1cdd1ec2010-01-09 02:26:46 +000054#include "ixgbe_sriov.h"
Auke Kok9a799d72007-09-15 14:07:45 -070055
56char ixgbe_driver_name[] = "ixgbe";
Stephen Hemminger9c8eb722007-10-29 10:46:24 -070057static const char ixgbe_driver_string[] =
Joe Perchese8e9f692010-09-07 21:34:53 +000058 "Intel(R) 10 Gigabit PCI Express Network Driver";
Jeff Kirsher8af3c332012-02-18 07:08:14 +000059#ifdef IXGBE_FCOE
Neerav Parikhea818752012-01-04 20:23:40 +000060char ixgbe_default_device_descr[] =
61 "Intel(R) 10 Gigabit Network Connection";
Jeff Kirsher8af3c332012-02-18 07:08:14 +000062#else
63static char ixgbe_default_device_descr[] =
64 "Intel(R) 10 Gigabit Network Connection";
65#endif
Jeff Kirsher75e3d3c2011-03-17 18:11:38 +000066#define MAJ 3
Don Skidmoreeef45602012-04-28 03:29:22 +000067#define MIN 9
68#define BUILD 15
Jeff Kirsher75e3d3c2011-03-17 18:11:38 +000069#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
Don Skidmorea38a1042011-05-20 03:05:14 +000070 __stringify(BUILD) "-k"
Stephen Hemminger9c8eb722007-10-29 10:46:24 -070071const char ixgbe_driver_version[] = DRV_VERSION;
Don Skidmorea52055e2011-02-23 09:58:39 +000072static const char ixgbe_copyright[] =
Don Skidmore94971822012-01-06 03:24:16 +000073 "Copyright (c) 1999-2012 Intel Corporation.";
Auke Kok9a799d72007-09-15 14:07:45 -070074
75static const struct ixgbe_info *ixgbe_info_tbl[] = {
Peter P Waskiewiczb4617242008-09-11 20:04:46 -070076 [board_82598] = &ixgbe_82598_info,
PJ Waskiewicze8e26352009-02-27 15:45:05 +000077 [board_82599] = &ixgbe_82599_info,
Don Skidmorefe15e8e12010-11-16 19:27:16 -080078 [board_X540] = &ixgbe_X540_info,
Auke Kok9a799d72007-09-15 14:07:45 -070079};
80
81/* ixgbe_pci_tbl - PCI Device ID Table
82 *
83 * Wildcard entries (PCI_ANY_ID) should come last
84 * Last entry must be all 0s
85 *
86 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
87 * Class, Class Mask, private data (not used) }
88 */
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000089static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
Alexander Duyck54239c62011-07-15 03:06:06 +000090 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), board_82598 },
91 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), board_82598 },
92 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), board_82598 },
93 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), board_82598 },
94 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), board_82598 },
95 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), board_82598 },
96 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), board_82598 },
97 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), board_82598 },
98 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), board_82598 },
99 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), board_82598 },
100 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), board_82598 },
101 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), board_82598 },
102 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), board_82599 },
103 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), board_82599 },
104 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), board_82599 },
105 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), board_82599 },
106 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), board_82599 },
107 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), board_82599 },
108 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 },
109 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), board_82599 },
110 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), board_82599 },
111 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), board_82599 },
112 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), board_82599 },
113 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 },
114 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 },
115 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
Emil Tantilov7d145282011-09-08 08:30:14 +0000116 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
Emil Tantilov9e791e42011-11-04 06:43:29 +0000117 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
joshua.a.hay@intel.comdf376f02012-09-21 00:08:21 +0000118 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
Auke Kok9a799d72007-09-15 14:07:45 -0700119 /* required last entry */
120 {0, }
121};
122MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
123
Jeff Garzik5dd2d332008-10-16 05:09:31 -0400124#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800125static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
Joe Perchese8e9f692010-09-07 21:34:53 +0000126 void *p);
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800127static struct notifier_block dca_notifier = {
128 .notifier_call = ixgbe_notify_dca,
129 .next = NULL,
130 .priority = 0
131};
132#endif
133
Greg Rose1cdd1ec2010-01-09 02:26:46 +0000134#ifdef CONFIG_PCI_IOV
135static unsigned int max_vfs;
136module_param(max_vfs, uint, 0);
Joe Perchese8e9f692010-09-07 21:34:53 +0000137MODULE_PARM_DESC(max_vfs,
Greg Rose6b42a9c2012-04-17 04:29:29 +0000138 "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63");
Greg Rose1cdd1ec2010-01-09 02:26:46 +0000139#endif /* CONFIG_PCI_IOV */
140
Peter P Waskiewicz Jr8ef78ad2012-02-01 09:19:21 +0000141static unsigned int allow_unsupported_sfp;
142module_param(allow_unsupported_sfp, uint, 0);
143MODULE_PARM_DESC(allow_unsupported_sfp,
144 "Allow unsupported and untested SFP+ modules on 82599-based adapters");
145
stephen hemmingerb3f4d592012-03-13 06:04:20 +0000146#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
147static int debug = -1;
148module_param(debug, int, 0);
149MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
150
Auke Kok9a799d72007-09-15 14:07:45 -0700151MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
152MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
153MODULE_LICENSE("GPL");
154MODULE_VERSION(DRV_VERSION);
155
Alexander Duyck70864002011-04-27 09:13:56 +0000156static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
157{
158 if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
159 !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state))
160 schedule_work(&adapter->service_task);
161}
162
163static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
164{
165 BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
166
Stephen Hemminger52f33af2011-12-22 16:34:52 +0000167 /* flush memory to make sure state is correct before next watchdog */
Alexander Duyck70864002011-04-27 09:13:56 +0000168 smp_mb__before_clear_bit();
169 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
170}
171
Taku Izumidcd79ae2010-04-27 14:39:53 +0000172struct ixgbe_reg_info {
173 u32 ofs;
174 char *name;
175};
176
177static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
178
179 /* General Registers */
180 {IXGBE_CTRL, "CTRL"},
181 {IXGBE_STATUS, "STATUS"},
182 {IXGBE_CTRL_EXT, "CTRL_EXT"},
183
184 /* Interrupt Registers */
185 {IXGBE_EICR, "EICR"},
186
187 /* RX Registers */
188 {IXGBE_SRRCTL(0), "SRRCTL"},
189 {IXGBE_DCA_RXCTRL(0), "DRXCTL"},
190 {IXGBE_RDLEN(0), "RDLEN"},
191 {IXGBE_RDH(0), "RDH"},
192 {IXGBE_RDT(0), "RDT"},
193 {IXGBE_RXDCTL(0), "RXDCTL"},
194 {IXGBE_RDBAL(0), "RDBAL"},
195 {IXGBE_RDBAH(0), "RDBAH"},
196
197 /* TX Registers */
198 {IXGBE_TDBAL(0), "TDBAL"},
199 {IXGBE_TDBAH(0), "TDBAH"},
200 {IXGBE_TDLEN(0), "TDLEN"},
201 {IXGBE_TDH(0), "TDH"},
202 {IXGBE_TDT(0), "TDT"},
203 {IXGBE_TXDCTL(0), "TXDCTL"},
204
205 /* List Terminator */
206 {}
207};
208
209
210/*
211 * ixgbe_regdump - register printout routine
212 */
213static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
214{
215 int i = 0, j = 0;
216 char rname[16];
217 u32 regs[64];
218
219 switch (reginfo->ofs) {
220 case IXGBE_SRRCTL(0):
221 for (i = 0; i < 64; i++)
222 regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
223 break;
224 case IXGBE_DCA_RXCTRL(0):
225 for (i = 0; i < 64; i++)
226 regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
227 break;
228 case IXGBE_RDLEN(0):
229 for (i = 0; i < 64; i++)
230 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
231 break;
232 case IXGBE_RDH(0):
233 for (i = 0; i < 64; i++)
234 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
235 break;
236 case IXGBE_RDT(0):
237 for (i = 0; i < 64; i++)
238 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
239 break;
240 case IXGBE_RXDCTL(0):
241 for (i = 0; i < 64; i++)
242 regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
243 break;
244 case IXGBE_RDBAL(0):
245 for (i = 0; i < 64; i++)
246 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
247 break;
248 case IXGBE_RDBAH(0):
249 for (i = 0; i < 64; i++)
250 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
251 break;
252 case IXGBE_TDBAL(0):
253 for (i = 0; i < 64; i++)
254 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
255 break;
256 case IXGBE_TDBAH(0):
257 for (i = 0; i < 64; i++)
258 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
259 break;
260 case IXGBE_TDLEN(0):
261 for (i = 0; i < 64; i++)
262 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
263 break;
264 case IXGBE_TDH(0):
265 for (i = 0; i < 64; i++)
266 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
267 break;
268 case IXGBE_TDT(0):
269 for (i = 0; i < 64; i++)
270 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
271 break;
272 case IXGBE_TXDCTL(0):
273 for (i = 0; i < 64; i++)
274 regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
275 break;
276 default:
Joe Perchesc7689572010-09-07 21:35:17 +0000277 pr_info("%-15s %08x\n", reginfo->name,
Taku Izumidcd79ae2010-04-27 14:39:53 +0000278 IXGBE_READ_REG(hw, reginfo->ofs));
279 return;
280 }
281
282 for (i = 0; i < 8; i++) {
283 snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7);
Joe Perchesc7689572010-09-07 21:35:17 +0000284 pr_err("%-15s", rname);
Taku Izumidcd79ae2010-04-27 14:39:53 +0000285 for (j = 0; j < 8; j++)
Joe Perchesc7689572010-09-07 21:35:17 +0000286 pr_cont(" %08x", regs[i*8+j]);
287 pr_cont("\n");
Taku Izumidcd79ae2010-04-27 14:39:53 +0000288 }
289
290}
291
292/*
293 * ixgbe_dump - Print registers, tx-rings and rx-rings
294 */
295static void ixgbe_dump(struct ixgbe_adapter *adapter)
296{
297 struct net_device *netdev = adapter->netdev;
298 struct ixgbe_hw *hw = &adapter->hw;
299 struct ixgbe_reg_info *reginfo;
300 int n = 0;
301 struct ixgbe_ring *tx_ring;
Alexander Duyck729739b2012-02-08 07:51:06 +0000302 struct ixgbe_tx_buffer *tx_buffer;
Taku Izumidcd79ae2010-04-27 14:39:53 +0000303 union ixgbe_adv_tx_desc *tx_desc;
304 struct my_u0 { u64 a; u64 b; } *u0;
305 struct ixgbe_ring *rx_ring;
306 union ixgbe_adv_rx_desc *rx_desc;
307 struct ixgbe_rx_buffer *rx_buffer_info;
308 u32 staterr;
309 int i = 0;
310
311 if (!netif_msg_hw(adapter))
312 return;
313
314 /* Print netdevice Info */
315 if (netdev) {
316 dev_info(&adapter->pdev->dev, "Net device Info\n");
Joe Perchesc7689572010-09-07 21:35:17 +0000317 pr_info("Device Name state "
Taku Izumidcd79ae2010-04-27 14:39:53 +0000318 "trans_start last_rx\n");
Joe Perchesc7689572010-09-07 21:35:17 +0000319 pr_info("%-15s %016lX %016lX %016lX\n",
320 netdev->name,
321 netdev->state,
322 netdev->trans_start,
323 netdev->last_rx);
Taku Izumidcd79ae2010-04-27 14:39:53 +0000324 }
325
326 /* Print Registers */
327 dev_info(&adapter->pdev->dev, "Register Dump\n");
Joe Perchesc7689572010-09-07 21:35:17 +0000328 pr_info(" Register Name Value\n");
Taku Izumidcd79ae2010-04-27 14:39:53 +0000329 for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
330 reginfo->name; reginfo++) {
331 ixgbe_regdump(hw, reginfo);
332 }
333
334 /* Print TX Ring Summary */
335 if (!netdev || !netif_running(netdev))
336 goto exit;
337
338 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
Joe Perchesc7689572010-09-07 21:35:17 +0000339 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
Taku Izumidcd79ae2010-04-27 14:39:53 +0000340 for (n = 0; n < adapter->num_tx_queues; n++) {
341 tx_ring = adapter->tx_ring[n];
Alexander Duyck729739b2012-02-08 07:51:06 +0000342 tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
Alexander Duyckd3d00232011-07-15 02:31:25 +0000343 pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
Taku Izumidcd79ae2010-04-27 14:39:53 +0000344 n, tx_ring->next_to_use, tx_ring->next_to_clean,
Alexander Duyck729739b2012-02-08 07:51:06 +0000345 (u64)dma_unmap_addr(tx_buffer, dma),
346 dma_unmap_len(tx_buffer, len),
347 tx_buffer->next_to_watch,
348 (u64)tx_buffer->time_stamp);
Taku Izumidcd79ae2010-04-27 14:39:53 +0000349 }
350
351 /* Print TX Rings */
352 if (!netif_msg_tx_done(adapter))
353 goto rx_ring_summary;
354
355 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
356
357 /* Transmit Descriptor Formats
358 *
Josh Hay39ac8682012-09-26 05:59:36 +0000359 * 82598 Advanced Transmit Descriptor
Taku Izumidcd79ae2010-04-27 14:39:53 +0000360 * +--------------------------------------------------------------+
361 * 0 | Buffer Address [63:0] |
362 * +--------------------------------------------------------------+
Josh Hay39ac8682012-09-26 05:59:36 +0000363 * 8 | PAYLEN | POPTS | IDX | STA | DCMD |DTYP | RSV | DTALEN |
Taku Izumidcd79ae2010-04-27 14:39:53 +0000364 * +--------------------------------------------------------------+
365 * 63 46 45 40 39 36 35 32 31 24 23 20 19 0
Josh Hay39ac8682012-09-26 05:59:36 +0000366 *
367 * 82598 Advanced Transmit Descriptor (Write-Back Format)
368 * +--------------------------------------------------------------+
369 * 0 | RSV [63:0] |
370 * +--------------------------------------------------------------+
371 * 8 | RSV | STA | NXTSEQ |
372 * +--------------------------------------------------------------+
373 * 63 36 35 32 31 0
374 *
375 * 82599+ Advanced Transmit Descriptor
376 * +--------------------------------------------------------------+
377 * 0 | Buffer Address [63:0] |
378 * +--------------------------------------------------------------+
379 * 8 |PAYLEN |POPTS|CC|IDX |STA |DCMD |DTYP |MAC |RSV |DTALEN |
380 * +--------------------------------------------------------------+
381 * 63 46 45 40 39 38 36 35 32 31 24 23 20 19 18 17 16 15 0
382 *
383 * 82599+ Advanced Transmit Descriptor (Write-Back Format)
384 * +--------------------------------------------------------------+
385 * 0 | RSV [63:0] |
386 * +--------------------------------------------------------------+
387 * 8 | RSV | STA | RSV |
388 * +--------------------------------------------------------------+
389 * 63 36 35 32 31 0
Taku Izumidcd79ae2010-04-27 14:39:53 +0000390 */
391
392 for (n = 0; n < adapter->num_tx_queues; n++) {
393 tx_ring = adapter->tx_ring[n];
Joe Perchesc7689572010-09-07 21:35:17 +0000394 pr_info("------------------------------------\n");
395 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
396 pr_info("------------------------------------\n");
397 pr_info("T [desc] [address 63:0 ] "
Taku Izumidcd79ae2010-04-27 14:39:53 +0000398 "[PlPOIdStDDt Ln] [bi->dma ] "
399 "leng ntw timestamp bi->skb\n");
400
401 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
Alexander Duycke4f74022012-01-31 02:59:44 +0000402 tx_desc = IXGBE_TX_DESC(tx_ring, i);
Alexander Duyck729739b2012-02-08 07:51:06 +0000403 tx_buffer = &tx_ring->tx_buffer_info[i];
Taku Izumidcd79ae2010-04-27 14:39:53 +0000404 u0 = (struct my_u0 *)tx_desc;
Joe Perchesc7689572010-09-07 21:35:17 +0000405 pr_info("T [0x%03X] %016llX %016llX %016llX"
Alexander Duyckd3d00232011-07-15 02:31:25 +0000406 " %04X %p %016llX %p", i,
Taku Izumidcd79ae2010-04-27 14:39:53 +0000407 le64_to_cpu(u0->a),
408 le64_to_cpu(u0->b),
Alexander Duyck729739b2012-02-08 07:51:06 +0000409 (u64)dma_unmap_addr(tx_buffer, dma),
410 dma_unmap_len(tx_buffer, len),
411 tx_buffer->next_to_watch,
412 (u64)tx_buffer->time_stamp,
413 tx_buffer->skb);
Taku Izumidcd79ae2010-04-27 14:39:53 +0000414 if (i == tx_ring->next_to_use &&
415 i == tx_ring->next_to_clean)
Joe Perchesc7689572010-09-07 21:35:17 +0000416 pr_cont(" NTC/U\n");
Taku Izumidcd79ae2010-04-27 14:39:53 +0000417 else if (i == tx_ring->next_to_use)
Joe Perchesc7689572010-09-07 21:35:17 +0000418 pr_cont(" NTU\n");
Taku Izumidcd79ae2010-04-27 14:39:53 +0000419 else if (i == tx_ring->next_to_clean)
Joe Perchesc7689572010-09-07 21:35:17 +0000420 pr_cont(" NTC\n");
Taku Izumidcd79ae2010-04-27 14:39:53 +0000421 else
Joe Perchesc7689572010-09-07 21:35:17 +0000422 pr_cont("\n");
Taku Izumidcd79ae2010-04-27 14:39:53 +0000423
424 if (netif_msg_pktdata(adapter) &&
Emil Tantilov9c50c032012-07-26 01:21:24 +0000425 tx_buffer->skb)
Taku Izumidcd79ae2010-04-27 14:39:53 +0000426 print_hex_dump(KERN_INFO, "",
427 DUMP_PREFIX_ADDRESS, 16, 1,
Emil Tantilov9c50c032012-07-26 01:21:24 +0000428 tx_buffer->skb->data,
Alexander Duyck729739b2012-02-08 07:51:06 +0000429 dma_unmap_len(tx_buffer, len),
430 true);
Taku Izumidcd79ae2010-04-27 14:39:53 +0000431 }
432 }
433
434 /* Print RX Rings Summary */
435rx_ring_summary:
436 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
Joe Perchesc7689572010-09-07 21:35:17 +0000437 pr_info("Queue [NTU] [NTC]\n");
Taku Izumidcd79ae2010-04-27 14:39:53 +0000438 for (n = 0; n < adapter->num_rx_queues; n++) {
439 rx_ring = adapter->rx_ring[n];
Joe Perchesc7689572010-09-07 21:35:17 +0000440 pr_info("%5d %5X %5X\n",
441 n, rx_ring->next_to_use, rx_ring->next_to_clean);
Taku Izumidcd79ae2010-04-27 14:39:53 +0000442 }
443
444 /* Print RX Rings */
445 if (!netif_msg_rx_status(adapter))
446 goto exit;
447
448 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
449
Josh Hay39ac8682012-09-26 05:59:36 +0000450 /* Receive Descriptor Formats
451 *
452 * 82598 Advanced Receive Descriptor (Read) Format
Taku Izumidcd79ae2010-04-27 14:39:53 +0000453 * 63 1 0
454 * +-----------------------------------------------------+
455 * 0 | Packet Buffer Address [63:1] |A0/NSE|
456 * +----------------------------------------------+------+
457 * 8 | Header Buffer Address [63:1] | DD |
458 * +-----------------------------------------------------+
459 *
460 *
Josh Hay39ac8682012-09-26 05:59:36 +0000461 * 82598 Advanced Receive Descriptor (Write-Back) Format
Taku Izumidcd79ae2010-04-27 14:39:53 +0000462 *
463 * 63 48 47 32 31 30 21 20 16 15 4 3 0
464 * +------------------------------------------------------+
Josh Hay39ac8682012-09-26 05:59:36 +0000465 * 0 | RSS Hash / |SPH| HDR_LEN | RSV |Packet| RSS |
466 * | Packet | IP | | | | Type | Type |
467 * | Checksum | Ident | | | | | |
Taku Izumidcd79ae2010-04-27 14:39:53 +0000468 * +------------------------------------------------------+
469 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
470 * +------------------------------------------------------+
471 * 63 48 47 32 31 20 19 0
Josh Hay39ac8682012-09-26 05:59:36 +0000472 *
473 * 82599+ Advanced Receive Descriptor (Read) Format
474 * 63 1 0
475 * +-----------------------------------------------------+
476 * 0 | Packet Buffer Address [63:1] |A0/NSE|
477 * +----------------------------------------------+------+
478 * 8 | Header Buffer Address [63:1] | DD |
479 * +-----------------------------------------------------+
480 *
481 *
482 * 82599+ Advanced Receive Descriptor (Write-Back) Format
483 *
484 * 63 48 47 32 31 30 21 20 17 16 4 3 0
485 * +------------------------------------------------------+
486 * 0 |RSS / Frag Checksum|SPH| HDR_LEN |RSC- |Packet| RSS |
487 * |/ RTT / PCoE_PARAM | | | CNT | Type | Type |
488 * |/ Flow Dir Flt ID | | | | | |
489 * +------------------------------------------------------+
490 * 8 | VLAN Tag | Length |Extended Error| Xtnd Status/NEXTP |
491 * +------------------------------------------------------+
492 * 63 48 47 32 31 20 19 0
Taku Izumidcd79ae2010-04-27 14:39:53 +0000493 */
Josh Hay39ac8682012-09-26 05:59:36 +0000494
Taku Izumidcd79ae2010-04-27 14:39:53 +0000495 for (n = 0; n < adapter->num_rx_queues; n++) {
496 rx_ring = adapter->rx_ring[n];
Joe Perchesc7689572010-09-07 21:35:17 +0000497 pr_info("------------------------------------\n");
498 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
499 pr_info("------------------------------------\n");
500 pr_info("R [desc] [ PktBuf A0] "
Taku Izumidcd79ae2010-04-27 14:39:53 +0000501 "[ HeadBuf DD] [bi->dma ] [bi->skb] "
502 "<-- Adv Rx Read format\n");
Joe Perchesc7689572010-09-07 21:35:17 +0000503 pr_info("RWB[desc] [PcsmIpSHl PtRs] "
Taku Izumidcd79ae2010-04-27 14:39:53 +0000504 "[vl er S cks ln] ---------------- [bi->skb] "
505 "<-- Adv Rx Write-Back format\n");
506
507 for (i = 0; i < rx_ring->count; i++) {
508 rx_buffer_info = &rx_ring->rx_buffer_info[i];
Alexander Duycke4f74022012-01-31 02:59:44 +0000509 rx_desc = IXGBE_RX_DESC(rx_ring, i);
Taku Izumidcd79ae2010-04-27 14:39:53 +0000510 u0 = (struct my_u0 *)rx_desc;
511 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
512 if (staterr & IXGBE_RXD_STAT_DD) {
513 /* Descriptor Done */
Joe Perchesc7689572010-09-07 21:35:17 +0000514 pr_info("RWB[0x%03X] %016llX "
Taku Izumidcd79ae2010-04-27 14:39:53 +0000515 "%016llX ---------------- %p", i,
516 le64_to_cpu(u0->a),
517 le64_to_cpu(u0->b),
518 rx_buffer_info->skb);
519 } else {
Joe Perchesc7689572010-09-07 21:35:17 +0000520 pr_info("R [0x%03X] %016llX "
Taku Izumidcd79ae2010-04-27 14:39:53 +0000521 "%016llX %016llX %p", i,
522 le64_to_cpu(u0->a),
523 le64_to_cpu(u0->b),
524 (u64)rx_buffer_info->dma,
525 rx_buffer_info->skb);
526
Emil Tantilov9c50c032012-07-26 01:21:24 +0000527 if (netif_msg_pktdata(adapter) &&
528 rx_buffer_info->dma) {
Taku Izumidcd79ae2010-04-27 14:39:53 +0000529 print_hex_dump(KERN_INFO, "",
530 DUMP_PREFIX_ADDRESS, 16, 1,
Emil Tantilov9c50c032012-07-26 01:21:24 +0000531 page_address(rx_buffer_info->page) +
532 rx_buffer_info->page_offset,
Alexander Duyckf8003262012-03-03 02:35:52 +0000533 ixgbe_rx_bufsz(rx_ring), true);
Taku Izumidcd79ae2010-04-27 14:39:53 +0000534 }
535 }
536
537 if (i == rx_ring->next_to_use)
Joe Perchesc7689572010-09-07 21:35:17 +0000538 pr_cont(" NTU\n");
Taku Izumidcd79ae2010-04-27 14:39:53 +0000539 else if (i == rx_ring->next_to_clean)
Joe Perchesc7689572010-09-07 21:35:17 +0000540 pr_cont(" NTC\n");
Taku Izumidcd79ae2010-04-27 14:39:53 +0000541 else
Joe Perchesc7689572010-09-07 21:35:17 +0000542 pr_cont("\n");
Taku Izumidcd79ae2010-04-27 14:39:53 +0000543
544 }
545 }
546
547exit:
548 return;
549}
550
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -0800551static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
552{
553 u32 ctrl_ext;
554
555 /* Let firmware take over control of h/w */
556 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
557 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
Joe Perchese8e9f692010-09-07 21:34:53 +0000558 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -0800559}
560
561static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
562{
563 u32 ctrl_ext;
564
565 /* Let firmware know the driver has taken over */
566 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
567 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
Joe Perchese8e9f692010-09-07 21:34:53 +0000568 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -0800569}
Auke Kok9a799d72007-09-15 14:07:45 -0700570
Ben Hutchings49ce9c22012-07-10 10:56:00 +0000571/**
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000572 * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
573 * @adapter: pointer to adapter struct
574 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
575 * @queue: queue to map the corresponding interrupt to
576 * @msix_vector: the vector to map to the corresponding queue
577 *
578 */
579static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
Joe Perchese8e9f692010-09-07 21:34:53 +0000580 u8 queue, u8 msix_vector)
Auke Kok9a799d72007-09-15 14:07:45 -0700581{
582 u32 ivar, index;
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000583 struct ixgbe_hw *hw = &adapter->hw;
584 switch (hw->mac.type) {
585 case ixgbe_mac_82598EB:
586 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
587 if (direction == -1)
588 direction = 0;
589 index = (((direction * 64) + queue) >> 2) & 0x1F;
590 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
591 ivar &= ~(0xFF << (8 * (queue & 0x3)));
592 ivar |= (msix_vector << (8 * (queue & 0x3)));
593 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
594 break;
595 case ixgbe_mac_82599EB:
Don Skidmoreb93a2222010-11-16 19:27:17 -0800596 case ixgbe_mac_X540:
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000597 if (direction == -1) {
598 /* other causes */
599 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
600 index = ((queue & 1) * 8);
601 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
602 ivar &= ~(0xFF << index);
603 ivar |= (msix_vector << index);
604 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
605 break;
606 } else {
607 /* tx or rx causes */
608 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
609 index = ((16 * (queue & 1)) + (8 * direction));
610 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
611 ivar &= ~(0xFF << index);
612 ivar |= (msix_vector << index);
613 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
614 break;
615 }
616 default:
617 break;
618 }
Auke Kok9a799d72007-09-15 14:07:45 -0700619}
620
Alexander Duyckfe49f042009-06-04 16:00:09 +0000621static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
Joe Perchese8e9f692010-09-07 21:34:53 +0000622 u64 qmask)
Alexander Duyckfe49f042009-06-04 16:00:09 +0000623{
624 u32 mask;
625
Alexander Duyckbd508172010-11-16 19:27:03 -0800626 switch (adapter->hw.mac.type) {
627 case ixgbe_mac_82598EB:
Alexander Duyckfe49f042009-06-04 16:00:09 +0000628 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
629 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
Alexander Duyckbd508172010-11-16 19:27:03 -0800630 break;
631 case ixgbe_mac_82599EB:
Don Skidmoreb93a2222010-11-16 19:27:17 -0800632 case ixgbe_mac_X540:
Alexander Duyckfe49f042009-06-04 16:00:09 +0000633 mask = (qmask & 0xFFFFFFFF);
634 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
635 mask = (qmask >> 32);
636 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
Alexander Duyckbd508172010-11-16 19:27:03 -0800637 break;
638 default:
639 break;
Alexander Duyckfe49f042009-06-04 16:00:09 +0000640 }
641}
642
Alexander Duyck729739b2012-02-08 07:51:06 +0000643void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *ring,
644 struct ixgbe_tx_buffer *tx_buffer)
Alexander Duyckd3d00232011-07-15 02:31:25 +0000645{
Alexander Duyck729739b2012-02-08 07:51:06 +0000646 if (tx_buffer->skb) {
647 dev_kfree_skb_any(tx_buffer->skb);
648 if (dma_unmap_len(tx_buffer, len))
Alexander Duyckd3d00232011-07-15 02:31:25 +0000649 dma_unmap_single(ring->dev,
Alexander Duyck729739b2012-02-08 07:51:06 +0000650 dma_unmap_addr(tx_buffer, dma),
651 dma_unmap_len(tx_buffer, len),
652 DMA_TO_DEVICE);
653 } else if (dma_unmap_len(tx_buffer, len)) {
654 dma_unmap_page(ring->dev,
655 dma_unmap_addr(tx_buffer, dma),
656 dma_unmap_len(tx_buffer, len),
657 DMA_TO_DEVICE);
Alexander Duyckd3d00232011-07-15 02:31:25 +0000658 }
Alexander Duyck729739b2012-02-08 07:51:06 +0000659 tx_buffer->next_to_watch = NULL;
660 tx_buffer->skb = NULL;
661 dma_unmap_len_set(tx_buffer, len, 0);
662 /* tx_buffer must be completely set up in the transmit path */
Auke Kok9a799d72007-09-15 14:07:45 -0700663}
664
Alexander Duyck943561d2012-05-09 22:14:44 -0700665static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter)
666{
667 struct ixgbe_hw *hw = &adapter->hw;
668 struct ixgbe_hw_stats *hwstats = &adapter->stats;
669 int i;
670 u32 data;
671
672 if ((hw->fc.current_mode != ixgbe_fc_full) &&
673 (hw->fc.current_mode != ixgbe_fc_rx_pause))
674 return;
675
676 switch (hw->mac.type) {
677 case ixgbe_mac_82598EB:
678 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
679 break;
680 default:
681 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
682 }
683 hwstats->lxoffrxc += data;
684
685 /* refill credits (no tx hang) if we received xoff */
686 if (!data)
687 return;
688
689 for (i = 0; i < adapter->num_tx_queues; i++)
690 clear_bit(__IXGBE_HANG_CHECK_ARMED,
691 &adapter->tx_ring[i]->state);
692}
693
John Fastabendc84d3242010-11-16 19:27:12 -0800694static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
Auke Kok9a799d72007-09-15 14:07:45 -0700695{
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700696 struct ixgbe_hw *hw = &adapter->hw;
John Fastabendc84d3242010-11-16 19:27:12 -0800697 struct ixgbe_hw_stats *hwstats = &adapter->stats;
John Fastabendc84d3242010-11-16 19:27:12 -0800698 u32 xoff[8] = {0};
699 int i;
Alexander Duyck943561d2012-05-09 22:14:44 -0700700 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700701
Alexander Duyck943561d2012-05-09 22:14:44 -0700702 if (adapter->ixgbe_ieee_pfc)
703 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
John Fastabendc84d3242010-11-16 19:27:12 -0800704
Alexander Duyck943561d2012-05-09 22:14:44 -0700705 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !pfc_en) {
706 ixgbe_update_xoff_rx_lfc(adapter);
John Fastabendc84d3242010-11-16 19:27:12 -0800707 return;
Alexander Duyck943561d2012-05-09 22:14:44 -0700708 }
John Fastabendc84d3242010-11-16 19:27:12 -0800709
710 /* update stats for each tc, only valid with PFC enabled */
711 for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
712 switch (hw->mac.type) {
713 case ixgbe_mac_82598EB:
714 xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
715 break;
716 default:
717 xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
718 }
719 hwstats->pxoffrxc[i] += xoff[i];
Auke Kok9a799d72007-09-15 14:07:45 -0700720 }
721
John Fastabendc84d3242010-11-16 19:27:12 -0800722 /* disarm tx queues that have received xoff frames */
723 for (i = 0; i < adapter->num_tx_queues; i++) {
724 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
John Fastabendfb5475f2011-04-26 07:26:36 +0000725 u8 tc = tx_ring->dcb_tc;
John Fastabendc84d3242010-11-16 19:27:12 -0800726
727 if (xoff[tc])
728 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
729 }
730}
731
732static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
733{
Alexander Duyck7d7ce682012-02-08 07:50:51 +0000734 return ring->stats.packets;
John Fastabendc84d3242010-11-16 19:27:12 -0800735}
736
737static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
738{
739 struct ixgbe_adapter *adapter = netdev_priv(ring->netdev);
740 struct ixgbe_hw *hw = &adapter->hw;
741
742 u32 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
743 u32 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
744
745 if (head != tail)
746 return (head < tail) ?
747 tail - head : (tail + ring->count - head);
748
749 return 0;
750}
751
752static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
753{
754 u32 tx_done = ixgbe_get_tx_completed(tx_ring);
755 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
756 u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
757 bool ret = false;
758
759 clear_check_for_tx_hang(tx_ring);
760
761 /*
762 * Check for a hung queue, but be thorough. This verifies
763 * that a transmit has been completed since the previous
764 * check AND there is at least one packet pending. The
765 * ARMED bit is set to indicate a potential hang. The
766 * bit is cleared if a pause frame is received to remove
767 * false hang detection due to PFC or 802.3x frames. By
768 * requiring this to fail twice we avoid races with
769 * pfc clearing the ARMED bit and conditions where we
770 * run the check_tx_hang logic with a transmit completion
771 * pending but without time to complete it yet.
772 */
773 if ((tx_done_old == tx_done) && tx_pending) {
774 /* make sure it is true for two checks in a row */
775 ret = test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
776 &tx_ring->state);
777 } else {
778 /* update completed stats and continue */
779 tx_ring->tx_stats.tx_done_old = tx_done;
780 /* reset the countdown */
781 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
782 }
783
784 return ret;
Auke Kok9a799d72007-09-15 14:07:45 -0700785}
786
Alexander Duyckc83c6cb2011-04-27 09:21:16 +0000787/**
788 * ixgbe_tx_timeout_reset - initiate reset due to Tx timeout
789 * @adapter: driver private struct
790 **/
791static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
792{
793
794 /* Do the reset outside of interrupt context */
795 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
796 adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
797 ixgbe_service_event_schedule(adapter);
798 }
799}
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700800
Auke Kok9a799d72007-09-15 14:07:45 -0700801/**
802 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
Alexander Duyckfe49f042009-06-04 16:00:09 +0000803 * @q_vector: structure containing interrupt and ring information
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700804 * @tx_ring: tx ring to clean
Auke Kok9a799d72007-09-15 14:07:45 -0700805 **/
Alexander Duyckfe49f042009-06-04 16:00:09 +0000806static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
Joe Perchese8e9f692010-09-07 21:34:53 +0000807 struct ixgbe_ring *tx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -0700808{
Alexander Duyckfe49f042009-06-04 16:00:09 +0000809 struct ixgbe_adapter *adapter = q_vector->adapter;
Alexander Duyckd3d00232011-07-15 02:31:25 +0000810 struct ixgbe_tx_buffer *tx_buffer;
811 union ixgbe_adv_tx_desc *tx_desc;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700812 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck59224552011-08-31 00:01:06 +0000813 unsigned int budget = q_vector->tx.work_limit;
Alexander Duyck729739b2012-02-08 07:51:06 +0000814 unsigned int i = tx_ring->next_to_clean;
815
816 if (test_bit(__IXGBE_DOWN, &adapter->state))
817 return true;
Auke Kok9a799d72007-09-15 14:07:45 -0700818
Alexander Duyckd3d00232011-07-15 02:31:25 +0000819 tx_buffer = &tx_ring->tx_buffer_info[i];
Alexander Duycke4f74022012-01-31 02:59:44 +0000820 tx_desc = IXGBE_TX_DESC(tx_ring, i);
Alexander Duyck729739b2012-02-08 07:51:06 +0000821 i -= tx_ring->count;
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800822
Alexander Duyck729739b2012-02-08 07:51:06 +0000823 do {
Alexander Duyckd3d00232011-07-15 02:31:25 +0000824 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
Auke Kok9a799d72007-09-15 14:07:45 -0700825
Alexander Duyckd3d00232011-07-15 02:31:25 +0000826 /* if next_to_watch is not set then there is no work pending */
827 if (!eop_desc)
828 break;
829
Alexander Duyck7f83a9e2012-02-08 07:49:23 +0000830 /* prevent any other reads prior to eop_desc */
831 rmb();
832
Alexander Duyckd3d00232011-07-15 02:31:25 +0000833 /* if DD is not set pending work has not been completed */
834 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
835 break;
836
Alexander Duyckd3d00232011-07-15 02:31:25 +0000837 /* clear next_to_watch to prevent false hangs */
838 tx_buffer->next_to_watch = NULL;
839
Alexander Duyck091a6242012-02-08 07:51:01 +0000840 /* update the statistics for this packet */
841 total_bytes += tx_buffer->bytecount;
842 total_packets += tx_buffer->gso_segs;
843
Jacob Keller0ede4a62012-05-22 06:08:32 +0000844 if (unlikely(tx_buffer->tx_flags & IXGBE_TX_FLAGS_TSTAMP))
845 ixgbe_ptp_tx_hwtstamp(q_vector, tx_buffer->skb);
Jacob Keller0ede4a62012-05-22 06:08:32 +0000846
Alexander Duyckfd0db0e2012-02-08 07:50:56 +0000847 /* free the skb */
848 dev_kfree_skb_any(tx_buffer->skb);
849
Alexander Duyck729739b2012-02-08 07:51:06 +0000850 /* unmap skb header data */
851 dma_unmap_single(tx_ring->dev,
852 dma_unmap_addr(tx_buffer, dma),
853 dma_unmap_len(tx_buffer, len),
854 DMA_TO_DEVICE);
855
Alexander Duyckfd0db0e2012-02-08 07:50:56 +0000856 /* clear tx_buffer data */
857 tx_buffer->skb = NULL;
Alexander Duyck729739b2012-02-08 07:51:06 +0000858 dma_unmap_len_set(tx_buffer, len, 0);
Alexander Duyckfd0db0e2012-02-08 07:50:56 +0000859
Alexander Duyck729739b2012-02-08 07:51:06 +0000860 /* unmap remaining buffers */
861 while (tx_desc != eop_desc) {
Alexander Duyckd3d00232011-07-15 02:31:25 +0000862 tx_buffer++;
863 tx_desc++;
864 i++;
Alexander Duyck729739b2012-02-08 07:51:06 +0000865 if (unlikely(!i)) {
866 i -= tx_ring->count;
Alexander Duyckd3d00232011-07-15 02:31:25 +0000867 tx_buffer = tx_ring->tx_buffer_info;
Alexander Duycke4f74022012-01-31 02:59:44 +0000868 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
Alexander Duyckd3d00232011-07-15 02:31:25 +0000869 }
870
Alexander Duyck729739b2012-02-08 07:51:06 +0000871 /* unmap any remaining paged data */
872 if (dma_unmap_len(tx_buffer, len)) {
873 dma_unmap_page(tx_ring->dev,
874 dma_unmap_addr(tx_buffer, dma),
875 dma_unmap_len(tx_buffer, len),
876 DMA_TO_DEVICE);
877 dma_unmap_len_set(tx_buffer, len, 0);
878 }
879 }
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800880
Alexander Duyck729739b2012-02-08 07:51:06 +0000881 /* move us one more past the eop_desc for start of next pkt */
882 tx_buffer++;
883 tx_desc++;
884 i++;
885 if (unlikely(!i)) {
886 i -= tx_ring->count;
887 tx_buffer = tx_ring->tx_buffer_info;
888 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
889 }
890
891 /* issue prefetch for next Tx descriptor */
892 prefetch(tx_desc);
893
894 /* update budget accounting */
895 budget--;
896 } while (likely(budget));
897
898 i += tx_ring->count;
Auke Kok9a799d72007-09-15 14:07:45 -0700899 tx_ring->next_to_clean = i;
Alexander Duyckd3d00232011-07-15 02:31:25 +0000900 u64_stats_update_begin(&tx_ring->syncp);
Alexander Duyckb9537992010-11-16 19:26:58 -0800901 tx_ring->stats.bytes += total_bytes;
Alexander Duyckbd198052011-06-11 01:45:08 +0000902 tx_ring->stats.packets += total_packets;
Alexander Duyckd3d00232011-07-15 02:31:25 +0000903 u64_stats_update_end(&tx_ring->syncp);
Alexander Duyckbd198052011-06-11 01:45:08 +0000904 q_vector->tx.total_bytes += total_bytes;
905 q_vector->tx.total_packets += total_packets;
Alexander Duyckb9537992010-11-16 19:26:58 -0800906
John Fastabendc84d3242010-11-16 19:27:12 -0800907 if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
Alexander Duyckb9537992010-11-16 19:26:58 -0800908 /* schedule immediate reset if we believe we hung */
John Fastabendc84d3242010-11-16 19:27:12 -0800909 struct ixgbe_hw *hw = &adapter->hw;
John Fastabendc84d3242010-11-16 19:27:12 -0800910 e_err(drv, "Detected Tx Unit Hang\n"
911 " Tx Queue <%d>\n"
912 " TDH, TDT <%x>, <%x>\n"
913 " next_to_use <%x>\n"
914 " next_to_clean <%x>\n"
915 "tx_buffer_info[next_to_clean]\n"
916 " time_stamp <%lx>\n"
917 " jiffies <%lx>\n",
918 tx_ring->queue_index,
919 IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
920 IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
Alexander Duyckd3d00232011-07-15 02:31:25 +0000921 tx_ring->next_to_use, i,
922 tx_ring->tx_buffer_info[i].time_stamp, jiffies);
John Fastabendc84d3242010-11-16 19:27:12 -0800923
924 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
925
926 e_info(probe,
927 "tx hang %d detected on queue %d, resetting adapter\n",
928 adapter->tx_timeout_count + 1, tx_ring->queue_index);
929
930 /* schedule immediate reset if we believe we hung */
Alexander Duyckc83c6cb2011-04-27 09:21:16 +0000931 ixgbe_tx_timeout_reset(adapter);
Alexander Duyckb9537992010-11-16 19:26:58 -0800932
933 /* the adapter is about to reset, no point in enabling stuff */
Alexander Duyck59224552011-08-31 00:01:06 +0000934 return true;
Alexander Duyckb9537992010-11-16 19:26:58 -0800935 }
Auke Kok9a799d72007-09-15 14:07:45 -0700936
Alexander Duyckb2d96e02012-02-07 08:14:33 +0000937 netdev_tx_completed_queue(txring_txq(tx_ring),
938 total_packets, total_bytes);
939
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800940#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
Alexander Duyck30065e62011-07-15 03:05:14 +0000941 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
Alexander Duyck7d4987d2011-05-27 05:31:37 +0000942 (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800943 /* Make sure that anybody stopping the queue after this
944 * sees the new next_to_clean.
945 */
946 smp_mb();
Alexander Duyck729739b2012-02-08 07:51:06 +0000947 if (__netif_subqueue_stopped(tx_ring->netdev,
948 tx_ring->queue_index)
949 && !test_bit(__IXGBE_DOWN, &adapter->state)) {
950 netif_wake_subqueue(tx_ring->netdev,
951 tx_ring->queue_index);
Alexander Duyck5b7da512010-11-16 19:26:50 -0800952 ++tx_ring->tx_stats.restart_queue;
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -0800953 }
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800954 }
Auke Kok9a799d72007-09-15 14:07:45 -0700955
Alexander Duyck59224552011-08-31 00:01:06 +0000956 return !!budget;
Auke Kok9a799d72007-09-15 14:07:45 -0700957}
958
Jeff Garzik5dd2d332008-10-16 05:09:31 -0400959#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800960static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
Alexander Duyck33cf09c2010-11-16 19:26:55 -0800961 struct ixgbe_ring *tx_ring,
962 int cpu)
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800963{
Don Skidmoreee5f7842009-11-06 12:56:20 +0000964 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyckbdda1a62012-02-08 07:50:14 +0000965 u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
966 u16 reg_offset;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800967
Alexander Duyck33cf09c2010-11-16 19:26:55 -0800968 switch (hw->mac.type) {
969 case ixgbe_mac_82598EB:
Alexander Duyckbdda1a62012-02-08 07:50:14 +0000970 reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx);
Alexander Duyck33cf09c2010-11-16 19:26:55 -0800971 break;
972 case ixgbe_mac_82599EB:
Don Skidmoreb93a2222010-11-16 19:27:17 -0800973 case ixgbe_mac_X540:
Alexander Duyckbdda1a62012-02-08 07:50:14 +0000974 reg_offset = IXGBE_DCA_TXCTRL_82599(tx_ring->reg_idx);
975 txctrl <<= IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599;
976 break;
977 default:
978 /* for unknown hardware do not write register */
979 return;
980 }
981
982 /*
983 * We can enable relaxed ordering for reads, but not writes when
984 * DCA is enabled. This is due to a known issue in some chipsets
985 * which will cause the DCA tag to be cleared.
986 */
987 txctrl |= IXGBE_DCA_TXCTRL_DESC_RRO_EN |
988 IXGBE_DCA_TXCTRL_DATA_RRO_EN |
989 IXGBE_DCA_TXCTRL_DESC_DCA_EN;
990
991 IXGBE_WRITE_REG(hw, reg_offset, txctrl);
992}
993
994static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
995 struct ixgbe_ring *rx_ring,
996 int cpu)
997{
998 struct ixgbe_hw *hw = &adapter->hw;
999 u32 rxctrl = dca3_get_tag(rx_ring->dev, cpu);
1000 u8 reg_idx = rx_ring->reg_idx;
1001
1002
1003 switch (hw->mac.type) {
1004 case ixgbe_mac_82599EB:
1005 case ixgbe_mac_X540:
1006 rxctrl <<= IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599;
Alexander Duyck33cf09c2010-11-16 19:26:55 -08001007 break;
1008 default:
1009 break;
Jeb Cramerbd0362d2008-03-03 15:04:02 -08001010 }
Alexander Duyckbdda1a62012-02-08 07:50:14 +00001011
1012 /*
1013 * We can enable relaxed ordering for reads, but not writes when
1014 * DCA is enabled. This is due to a known issue in some chipsets
1015 * which will cause the DCA tag to be cleared.
1016 */
1017 rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN |
1018 IXGBE_DCA_RXCTRL_DATA_DCA_EN |
1019 IXGBE_DCA_RXCTRL_DESC_DCA_EN;
1020
1021 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
Alexander Duyck33cf09c2010-11-16 19:26:55 -08001022}
1023
1024static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
1025{
1026 struct ixgbe_adapter *adapter = q_vector->adapter;
Alexander Duyckefe3d3c2011-07-15 03:05:21 +00001027 struct ixgbe_ring *ring;
Alexander Duyck33cf09c2010-11-16 19:26:55 -08001028 int cpu = get_cpu();
Alexander Duyck33cf09c2010-11-16 19:26:55 -08001029
1030 if (q_vector->cpu == cpu)
1031 goto out_no_update;
1032
Alexander Duycka5579282012-02-08 07:50:04 +00001033 ixgbe_for_each_ring(ring, q_vector->tx)
Alexander Duyckefe3d3c2011-07-15 03:05:21 +00001034 ixgbe_update_tx_dca(adapter, ring, cpu);
Alexander Duyck33cf09c2010-11-16 19:26:55 -08001035
Alexander Duycka5579282012-02-08 07:50:04 +00001036 ixgbe_for_each_ring(ring, q_vector->rx)
Alexander Duyckefe3d3c2011-07-15 03:05:21 +00001037 ixgbe_update_rx_dca(adapter, ring, cpu);
Alexander Duyck33cf09c2010-11-16 19:26:55 -08001038
1039 q_vector->cpu = cpu;
1040out_no_update:
Jeb Cramerbd0362d2008-03-03 15:04:02 -08001041 put_cpu();
1042}
1043
1044static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
1045{
1046 int i;
1047
1048 if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
1049 return;
1050
Alexander Duycke35ec122009-05-21 13:07:12 +00001051 /* always use CB2 mode, difference is masked in the CB driver */
1052 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
1053
Alexander Duyck49c7ffb2012-05-05 05:30:43 +00001054 for (i = 0; i < adapter->num_q_vectors; i++) {
Alexander Duyck33cf09c2010-11-16 19:26:55 -08001055 adapter->q_vector[i]->cpu = -1;
1056 ixgbe_update_dca(adapter->q_vector[i]);
Jeb Cramerbd0362d2008-03-03 15:04:02 -08001057 }
1058}
1059
1060static int __ixgbe_notify_dca(struct device *dev, void *data)
1061{
Alexander Duyckc60fbb02010-11-16 19:26:54 -08001062 struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
Jeb Cramerbd0362d2008-03-03 15:04:02 -08001063 unsigned long event = *(unsigned long *)data;
1064
Don Skidmore2a72c312011-07-20 02:27:05 +00001065 if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE))
Alexander Duyck33cf09c2010-11-16 19:26:55 -08001066 return 0;
1067
Jeb Cramerbd0362d2008-03-03 15:04:02 -08001068 switch (event) {
1069 case DCA_PROVIDER_ADD:
Jesse Brandeburg96b0e0f2008-08-26 04:27:21 -07001070 /* if we're already enabled, don't do it again */
1071 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1072 break;
Denis V. Lunev652f0932008-03-27 14:39:17 +03001073 if (dca_add_requester(dev) == 0) {
Jesse Brandeburg96b0e0f2008-08-26 04:27:21 -07001074 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
Jeb Cramerbd0362d2008-03-03 15:04:02 -08001075 ixgbe_setup_dca(adapter);
1076 break;
1077 }
1078 /* Fall Through since DCA is disabled. */
1079 case DCA_PROVIDER_REMOVE:
1080 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
1081 dca_remove_requester(dev);
1082 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
1083 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
1084 }
1085 break;
1086 }
1087
Denis V. Lunev652f0932008-03-27 14:39:17 +03001088 return 0;
Jeb Cramerbd0362d2008-03-03 15:04:02 -08001089}
Emil Tantilov67a74ee2011-04-23 04:50:40 +00001090
Alexander Duyckbdda1a62012-02-08 07:50:14 +00001091#endif /* CONFIG_IXGBE_DCA */
Alexander Duyck8a0da212012-01-31 02:59:49 +00001092static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
1093 union ixgbe_adv_rx_desc *rx_desc,
Emil Tantilov67a74ee2011-04-23 04:50:40 +00001094 struct sk_buff *skb)
1095{
Alexander Duyck8a0da212012-01-31 02:59:49 +00001096 if (ring->netdev->features & NETIF_F_RXHASH)
1097 skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
Emil Tantilov67a74ee2011-04-23 04:50:40 +00001098}
1099
Alexander Duyckf8003262012-03-03 02:35:52 +00001100#ifdef IXGBE_FCOE
Auke Kok9a799d72007-09-15 14:07:45 -07001101/**
Alexander Duyckff886df2011-06-11 01:45:13 +00001102 * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type
Alexander Duyck57efd442012-06-25 21:54:46 +00001103 * @ring: structure containing ring specific data
Alexander Duyckff886df2011-06-11 01:45:13 +00001104 * @rx_desc: advanced rx descriptor
1105 *
1106 * Returns : true if it is FCoE pkt
1107 */
Alexander Duyck57efd442012-06-25 21:54:46 +00001108static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring,
Alexander Duyckff886df2011-06-11 01:45:13 +00001109 union ixgbe_adv_rx_desc *rx_desc)
1110{
1111 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1112
Alexander Duyck57efd442012-06-25 21:54:46 +00001113 return test_bit(__IXGBE_RX_FCOE, &ring->state) &&
Alexander Duyckff886df2011-06-11 01:45:13 +00001114 ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) ==
1115 (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE <<
1116 IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT)));
1117}
1118
Alexander Duyckf8003262012-03-03 02:35:52 +00001119#endif /* IXGBE_FCOE */
Alexander Duyckff886df2011-06-11 01:45:13 +00001120/**
Ayyappan Veeraiyane59bd252008-02-01 15:59:09 -08001121 * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
Alexander Duyck8a0da212012-01-31 02:59:49 +00001122 * @ring: structure containing ring specific data
1123 * @rx_desc: current Rx descriptor being processed
Ayyappan Veeraiyane59bd252008-02-01 15:59:09 -08001124 * @skb: skb currently being received and modified
1125 **/
Alexander Duyck8a0da212012-01-31 02:59:49 +00001126static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
Don Skidmore8bae1b22009-07-23 18:00:39 +00001127 union ixgbe_adv_rx_desc *rx_desc,
Alexander Duyckf56e0cb2012-01-31 02:59:39 +00001128 struct sk_buff *skb)
Auke Kok9a799d72007-09-15 14:07:45 -07001129{
Alexander Duyck8a0da212012-01-31 02:59:49 +00001130 skb_checksum_none_assert(skb);
Auke Kok9a799d72007-09-15 14:07:45 -07001131
Jesse Brandeburg712744b2008-08-26 04:26:56 -07001132 /* Rx csum disabled */
Alexander Duyck8a0da212012-01-31 02:59:49 +00001133 if (!(ring->netdev->features & NETIF_F_RXCSUM))
Auke Kok9a799d72007-09-15 14:07:45 -07001134 return;
Ayyappan Veeraiyane59bd252008-02-01 15:59:09 -08001135
1136 /* if IP and error */
Alexander Duyckf56e0cb2012-01-31 02:59:39 +00001137 if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
1138 ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
Alexander Duyck8a0da212012-01-31 02:59:49 +00001139 ring->rx_stats.csum_err++;
Auke Kok9a799d72007-09-15 14:07:45 -07001140 return;
1141 }
Ayyappan Veeraiyane59bd252008-02-01 15:59:09 -08001142
Alexander Duyckf56e0cb2012-01-31 02:59:39 +00001143 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
Ayyappan Veeraiyane59bd252008-02-01 15:59:09 -08001144 return;
1145
Alexander Duyckf56e0cb2012-01-31 02:59:39 +00001146 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
Alexander Duyckf8003262012-03-03 02:35:52 +00001147 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
Don Skidmore8bae1b22009-07-23 18:00:39 +00001148
1149 /*
1150 * 82599 errata, UDP frames with a 0 checksum can be marked as
1151 * checksum errors.
1152 */
Alexander Duyck8a0da212012-01-31 02:59:49 +00001153 if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_UDP)) &&
1154 test_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state))
Don Skidmore8bae1b22009-07-23 18:00:39 +00001155 return;
1156
Alexander Duyck8a0da212012-01-31 02:59:49 +00001157 ring->rx_stats.csum_err++;
Ayyappan Veeraiyane59bd252008-02-01 15:59:09 -08001158 return;
1159 }
1160
Auke Kok9a799d72007-09-15 14:07:45 -07001161 /* It must be a TCP or UDP packet with a valid checksum */
Ayyappan Veeraiyane59bd252008-02-01 15:59:09 -08001162 skb->ip_summed = CHECKSUM_UNNECESSARY;
Auke Kok9a799d72007-09-15 14:07:45 -07001163}
1164
Alexander Duyck84ea2592010-11-16 19:26:49 -08001165static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001166{
Alexander Duyckf56e0cb2012-01-31 02:59:39 +00001167 rx_ring->next_to_use = val;
Alexander Duyckf8003262012-03-03 02:35:52 +00001168
1169 /* update next to alloc since we have filled the ring */
1170 rx_ring->next_to_alloc = val;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001171 /*
1172 * Force memory writes to complete before letting h/w
1173 * know there are new descriptors to fetch. (Only
1174 * applicable for weak-ordered memory model archs,
1175 * such as IA-64).
1176 */
1177 wmb();
Alexander Duyck84ea2592010-11-16 19:26:49 -08001178 writel(val, rx_ring->tail);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001179}
1180
Alexander Duyckf990b792012-01-31 02:59:34 +00001181static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
1182 struct ixgbe_rx_buffer *bi)
1183{
1184 struct page *page = bi->page;
Alexander Duyckf8003262012-03-03 02:35:52 +00001185 dma_addr_t dma = bi->dma;
Alexander Duyckf990b792012-01-31 02:59:34 +00001186
Alexander Duyckf8003262012-03-03 02:35:52 +00001187 /* since we are recycling buffers we should seldom need to alloc */
1188 if (likely(dma))
Alexander Duyckf990b792012-01-31 02:59:34 +00001189 return true;
1190
Alexander Duyckf8003262012-03-03 02:35:52 +00001191 /* alloc new page for storage */
1192 if (likely(!page)) {
Mel Gorman06140022012-07-31 16:44:24 -07001193 page = __skb_alloc_pages(GFP_ATOMIC | __GFP_COLD | __GFP_COMP,
1194 bi->skb, ixgbe_rx_pg_order(rx_ring));
Alexander Duyckf990b792012-01-31 02:59:34 +00001195 if (unlikely(!page)) {
1196 rx_ring->rx_stats.alloc_rx_page_failed++;
1197 return false;
1198 }
Alexander Duyckf8003262012-03-03 02:35:52 +00001199 bi->page = page;
Alexander Duyckf990b792012-01-31 02:59:34 +00001200 }
1201
Alexander Duyckf8003262012-03-03 02:35:52 +00001202 /* map page for use */
1203 dma = dma_map_page(rx_ring->dev, page, 0,
1204 ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
Alexander Duyckf990b792012-01-31 02:59:34 +00001205
Alexander Duyckf8003262012-03-03 02:35:52 +00001206 /*
1207 * if mapping failed free memory back to system since
1208 * there isn't much point in holding memory we can't use
1209 */
1210 if (dma_mapping_error(rx_ring->dev, dma)) {
Alexander Duyckdd411ec2012-04-06 04:24:50 +00001211 __free_pages(page, ixgbe_rx_pg_order(rx_ring));
Alexander Duyckf8003262012-03-03 02:35:52 +00001212 bi->page = NULL;
1213
Alexander Duyckf990b792012-01-31 02:59:34 +00001214 rx_ring->rx_stats.alloc_rx_page_failed++;
1215 return false;
1216 }
1217
Alexander Duyckf8003262012-03-03 02:35:52 +00001218 bi->dma = dma;
Alexander Duyckafaa9452012-07-20 08:08:12 +00001219 bi->page_offset = 0;
Alexander Duyckf8003262012-03-03 02:35:52 +00001220
Alexander Duyckf990b792012-01-31 02:59:34 +00001221 return true;
1222}
1223
Auke Kok9a799d72007-09-15 14:07:45 -07001224/**
Alexander Duyckf990b792012-01-31 02:59:34 +00001225 * ixgbe_alloc_rx_buffers - Replace used receive buffers
Alexander Duyckfc77dc32010-11-16 19:26:51 -08001226 * @rx_ring: ring to place buffers on
1227 * @cleaned_count: number of buffers to replace
Auke Kok9a799d72007-09-15 14:07:45 -07001228 **/
Alexander Duyckfc77dc32010-11-16 19:26:51 -08001229void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
Auke Kok9a799d72007-09-15 14:07:45 -07001230{
Auke Kok9a799d72007-09-15 14:07:45 -07001231 union ixgbe_adv_rx_desc *rx_desc;
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001232 struct ixgbe_rx_buffer *bi;
Alexander Duyckd5f398e2010-11-16 19:26:48 -08001233 u16 i = rx_ring->next_to_use;
Auke Kok9a799d72007-09-15 14:07:45 -07001234
Alexander Duyckf8003262012-03-03 02:35:52 +00001235 /* nothing to do */
1236 if (!cleaned_count)
Alexander Duyckfc77dc32010-11-16 19:26:51 -08001237 return;
1238
Alexander Duycke4f74022012-01-31 02:59:44 +00001239 rx_desc = IXGBE_RX_DESC(rx_ring, i);
Alexander Duyckf990b792012-01-31 02:59:34 +00001240 bi = &rx_ring->rx_buffer_info[i];
1241 i -= rx_ring->count;
1242
Alexander Duyckf8003262012-03-03 02:35:52 +00001243 do {
1244 if (!ixgbe_alloc_mapped_page(rx_ring, bi))
Alexander Duyckf990b792012-01-31 02:59:34 +00001245 break;
Auke Kok9a799d72007-09-15 14:07:45 -07001246
Alexander Duyckf8003262012-03-03 02:35:52 +00001247 /*
1248 * Refresh the desc even if buffer_addrs didn't change
1249 * because each write-back erases this info.
1250 */
1251 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
Auke Kok9a799d72007-09-15 14:07:45 -07001252
Alexander Duyckf990b792012-01-31 02:59:34 +00001253 rx_desc++;
1254 bi++;
Auke Kok9a799d72007-09-15 14:07:45 -07001255 i++;
Alexander Duyckf990b792012-01-31 02:59:34 +00001256 if (unlikely(!i)) {
Alexander Duycke4f74022012-01-31 02:59:44 +00001257 rx_desc = IXGBE_RX_DESC(rx_ring, 0);
Alexander Duyckf990b792012-01-31 02:59:34 +00001258 bi = rx_ring->rx_buffer_info;
1259 i -= rx_ring->count;
1260 }
1261
1262 /* clear the hdr_addr for the next_to_use descriptor */
1263 rx_desc->read.hdr_addr = 0;
Alexander Duyckf8003262012-03-03 02:35:52 +00001264
1265 cleaned_count--;
1266 } while (cleaned_count);
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07001267
Alexander Duyckf990b792012-01-31 02:59:34 +00001268 i += rx_ring->count;
1269
Alexander Duyckf56e0cb2012-01-31 02:59:39 +00001270 if (rx_ring->next_to_use != i)
Alexander Duyck84ea2592010-11-16 19:26:49 -08001271 ixgbe_release_rx_desc(rx_ring, i);
Auke Kok9a799d72007-09-15 14:07:45 -07001272}
1273
Alexander Duyck1d2024f2012-01-31 02:59:29 +00001274/**
1275 * ixgbe_get_headlen - determine size of header for RSC/LRO/GRO/FCOE
1276 * @data: pointer to the start of the headers
1277 * @max_len: total length of section to find headers in
1278 *
1279 * This function is meant to determine the length of headers that will
1280 * be recognized by hardware for LRO, GRO, and RSC offloads. The main
1281 * motivation of doing this is to only perform one pull for IPv4 TCP
1282 * packets so that we can do basic things like calculating the gso_size
1283 * based on the average data per packet.
1284 **/
1285static unsigned int ixgbe_get_headlen(unsigned char *data,
1286 unsigned int max_len)
1287{
1288 union {
1289 unsigned char *network;
1290 /* l2 headers */
1291 struct ethhdr *eth;
1292 struct vlan_hdr *vlan;
1293 /* l3 headers */
1294 struct iphdr *ipv4;
Alexander Duycka048b402012-05-24 08:26:29 +00001295 struct ipv6hdr *ipv6;
Alexander Duyck1d2024f2012-01-31 02:59:29 +00001296 } hdr;
1297 __be16 protocol;
1298 u8 nexthdr = 0; /* default to not TCP */
1299 u8 hlen;
1300
1301 /* this should never happen, but better safe than sorry */
1302 if (max_len < ETH_HLEN)
1303 return max_len;
1304
1305 /* initialize network frame pointer */
1306 hdr.network = data;
1307
1308 /* set first protocol and move network header forward */
1309 protocol = hdr.eth->h_proto;
1310 hdr.network += ETH_HLEN;
1311
1312 /* handle any vlan tag if present */
1313 if (protocol == __constant_htons(ETH_P_8021Q)) {
1314 if ((hdr.network - data) > (max_len - VLAN_HLEN))
1315 return max_len;
1316
1317 protocol = hdr.vlan->h_vlan_encapsulated_proto;
1318 hdr.network += VLAN_HLEN;
1319 }
1320
1321 /* handle L3 protocols */
1322 if (protocol == __constant_htons(ETH_P_IP)) {
1323 if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
1324 return max_len;
1325
1326 /* access ihl as a u8 to avoid unaligned access on ia64 */
1327 hlen = (hdr.network[0] & 0x0F) << 2;
1328
1329 /* verify hlen meets minimum size requirements */
1330 if (hlen < sizeof(struct iphdr))
1331 return hdr.network - data;
1332
1333 /* record next protocol */
1334 nexthdr = hdr.ipv4->protocol;
1335 hdr.network += hlen;
Alexander Duycka048b402012-05-24 08:26:29 +00001336 } else if (protocol == __constant_htons(ETH_P_IPV6)) {
1337 if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
1338 return max_len;
1339
1340 /* record next protocol */
1341 nexthdr = hdr.ipv6->nexthdr;
1342 hdr.network += sizeof(struct ipv6hdr);
Alexander Duyckf8003262012-03-03 02:35:52 +00001343#ifdef IXGBE_FCOE
Alexander Duyck1d2024f2012-01-31 02:59:29 +00001344 } else if (protocol == __constant_htons(ETH_P_FCOE)) {
1345 if ((hdr.network - data) > (max_len - FCOE_HEADER_LEN))
1346 return max_len;
1347 hdr.network += FCOE_HEADER_LEN;
1348#endif
1349 } else {
1350 return hdr.network - data;
1351 }
1352
Alexander Duycka048b402012-05-24 08:26:29 +00001353 /* finally sort out TCP/UDP */
Alexander Duyck1d2024f2012-01-31 02:59:29 +00001354 if (nexthdr == IPPROTO_TCP) {
1355 if ((hdr.network - data) > (max_len - sizeof(struct tcphdr)))
1356 return max_len;
1357
1358 /* access doff as a u8 to avoid unaligned access on ia64 */
1359 hlen = (hdr.network[12] & 0xF0) >> 2;
1360
1361 /* verify hlen meets minimum size requirements */
1362 if (hlen < sizeof(struct tcphdr))
1363 return hdr.network - data;
1364
1365 hdr.network += hlen;
Alexander Duycka048b402012-05-24 08:26:29 +00001366 } else if (nexthdr == IPPROTO_UDP) {
1367 if ((hdr.network - data) > (max_len - sizeof(struct udphdr)))
1368 return max_len;
1369
1370 hdr.network += sizeof(struct udphdr);
Alexander Duyck1d2024f2012-01-31 02:59:29 +00001371 }
1372
1373 /*
1374 * If everything has gone correctly hdr.network should be the
1375 * data section of the packet and will be the end of the header.
1376 * If not then it probably represents the end of the last recognized
1377 * header.
1378 */
1379 if ((hdr.network - data) < max_len)
1380 return hdr.network - data;
1381 else
1382 return max_len;
1383}
1384
Alexander Duyck1d2024f2012-01-31 02:59:29 +00001385static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
1386 struct sk_buff *skb)
1387{
Alexander Duyckf8003262012-03-03 02:35:52 +00001388 u16 hdr_len = skb_headlen(skb);
Alexander Duyck1d2024f2012-01-31 02:59:29 +00001389
1390 /* set gso_size to avoid messing up TCP MSS */
1391 skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len),
1392 IXGBE_CB(skb)->append_cnt);
1393}
1394
1395static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring,
1396 struct sk_buff *skb)
1397{
1398 /* if append_cnt is 0 then frame is not RSC */
1399 if (!IXGBE_CB(skb)->append_cnt)
1400 return;
1401
1402 rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt;
1403 rx_ring->rx_stats.rsc_flush++;
1404
1405 ixgbe_set_rsc_gso_size(rx_ring, skb);
1406
1407 /* gso_size is computed using append_cnt so always clear it last */
1408 IXGBE_CB(skb)->append_cnt = 0;
1409}
1410
Alexander Duyck8a0da212012-01-31 02:59:49 +00001411/**
1412 * ixgbe_process_skb_fields - Populate skb header fields from Rx descriptor
1413 * @rx_ring: rx descriptor ring packet is being transacted on
1414 * @rx_desc: pointer to the EOP Rx descriptor
1415 * @skb: pointer to current skb being populated
1416 *
1417 * This function checks the ring, descriptor, and packet information in
1418 * order to populate the hash, checksum, VLAN, timestamp, protocol, and
1419 * other fields within the skb.
1420 **/
1421static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
1422 union ixgbe_adv_rx_desc *rx_desc,
1423 struct sk_buff *skb)
1424{
John Fastabend43e95f12012-05-15 06:12:17 +00001425 struct net_device *dev = rx_ring->netdev;
1426
Alexander Duyck8a0da212012-01-31 02:59:49 +00001427 ixgbe_update_rsc_stats(rx_ring, skb);
1428
1429 ixgbe_rx_hash(rx_ring, rx_desc, skb);
1430
1431 ixgbe_rx_checksum(rx_ring, rx_desc, skb);
1432
Jacob Keller1d1a79b2012-05-22 06:18:08 +00001433 ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb);
Jacob Keller3a6a4ed2012-05-01 05:24:58 +00001434
John Fastabend43e95f12012-05-15 06:12:17 +00001435 if ((dev->features & NETIF_F_HW_VLAN_RX) &&
1436 ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
Alexander Duyck8a0da212012-01-31 02:59:49 +00001437 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
1438 __vlan_hwaccel_put_tag(skb, vid);
1439 }
1440
1441 skb_record_rx_queue(skb, rx_ring->queue_index);
1442
John Fastabend43e95f12012-05-15 06:12:17 +00001443 skb->protocol = eth_type_trans(skb, dev);
Alexander Duyck8a0da212012-01-31 02:59:49 +00001444}
1445
1446static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
1447 struct sk_buff *skb)
1448{
1449 struct ixgbe_adapter *adapter = q_vector->adapter;
1450
1451 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
1452 napi_gro_receive(&q_vector->napi, skb);
1453 else
1454 netif_rx(skb);
Alexander Duyckaa801752010-11-16 19:27:02 -08001455}
Mallikarjuna R Chilakala43634e82010-02-25 23:14:37 +00001456
Alexander Duyckf8003262012-03-03 02:35:52 +00001457/**
1458 * ixgbe_is_non_eop - process handling of non-EOP buffers
1459 * @rx_ring: Rx ring being processed
1460 * @rx_desc: Rx descriptor for current buffer
1461 * @skb: Current socket buffer containing buffer in progress
1462 *
1463 * This function updates next to clean. If the buffer is an EOP buffer
1464 * this function exits returning false, otherwise it will place the
1465 * sk_buff in the next buffer to be chained and return true indicating
1466 * that this is in fact a non-EOP buffer.
1467 **/
1468static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
1469 union ixgbe_adv_rx_desc *rx_desc,
1470 struct sk_buff *skb)
1471{
1472 u32 ntc = rx_ring->next_to_clean + 1;
1473
1474 /* fetch, update, and store next to clean */
1475 ntc = (ntc < rx_ring->count) ? ntc : 0;
1476 rx_ring->next_to_clean = ntc;
1477
1478 prefetch(IXGBE_RX_DESC(rx_ring, ntc));
1479
Alexander Duyck5a02cbd2012-07-20 08:08:51 +00001480 /* update RSC append count if present */
1481 if (ring_is_rsc_enabled(rx_ring)) {
1482 __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data &
1483 cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK);
1484
1485 if (unlikely(rsc_enabled)) {
1486 u32 rsc_cnt = le32_to_cpu(rsc_enabled);
1487
1488 rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT;
1489 IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
1490
1491 /* update ntc based on RSC value */
1492 ntc = le32_to_cpu(rx_desc->wb.upper.status_error);
1493 ntc &= IXGBE_RXDADV_NEXTP_MASK;
1494 ntc >>= IXGBE_RXDADV_NEXTP_SHIFT;
1495 }
1496 }
1497
1498 /* if we are the last buffer then there is nothing else to do */
Alexander Duyckf8003262012-03-03 02:35:52 +00001499 if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
1500 return false;
1501
Alexander Duyckf8003262012-03-03 02:35:52 +00001502 /* place skb in next buffer to be received */
1503 rx_ring->rx_buffer_info[ntc].skb = skb;
1504 rx_ring->rx_stats.non_eop_descs++;
1505
1506 return true;
1507}
1508
1509/**
Alexander Duyck19861ce2012-07-20 08:08:33 +00001510 * ixgbe_pull_tail - ixgbe specific version of skb_pull_tail
1511 * @rx_ring: rx descriptor ring packet is being transacted on
1512 * @skb: pointer to current skb being adjusted
1513 *
1514 * This function is an ixgbe specific version of __pskb_pull_tail. The
1515 * main difference between this version and the original function is that
1516 * this function can make several assumptions about the state of things
1517 * that allow for significant optimizations versus the standard function.
1518 * As a result we can do things like drop a frag and maintain an accurate
1519 * truesize for the skb.
1520 */
1521static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
1522 struct sk_buff *skb)
1523{
1524 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1525 unsigned char *va;
1526 unsigned int pull_len;
1527
1528 /*
1529 * it is valid to use page_address instead of kmap since we are
1530 * working with pages allocated out of the lomem pool per
1531 * alloc_page(GFP_ATOMIC)
1532 */
1533 va = skb_frag_address(frag);
1534
1535 /*
1536 * we need the header to contain the greater of either ETH_HLEN or
1537 * 60 bytes if the skb->len is less than 60 for skb_pad.
1538 */
Alexander Duyckcf3fe7a2012-07-20 08:08:39 +00001539 pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE);
Alexander Duyck19861ce2012-07-20 08:08:33 +00001540
1541 /* align pull length to size of long to optimize memcpy performance */
1542 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
1543
1544 /* update all of the pointers */
1545 skb_frag_size_sub(frag, pull_len);
1546 frag->page_offset += pull_len;
1547 skb->data_len -= pull_len;
1548 skb->tail += pull_len;
Alexander Duyck19861ce2012-07-20 08:08:33 +00001549}
1550
1551/**
Alexander Duyck42073d92012-07-20 08:08:28 +00001552 * ixgbe_dma_sync_frag - perform DMA sync for first frag of SKB
1553 * @rx_ring: rx descriptor ring packet is being transacted on
1554 * @skb: pointer to current skb being updated
1555 *
1556 * This function provides a basic DMA sync up for the first fragment of an
1557 * skb. The reason for doing this is that the first fragment cannot be
1558 * unmapped until we have reached the end of packet descriptor for a buffer
1559 * chain.
1560 */
1561static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
1562 struct sk_buff *skb)
1563{
1564 /* if the page was released unmap it, else just sync our portion */
1565 if (unlikely(IXGBE_CB(skb)->page_released)) {
1566 dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma,
1567 ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
1568 IXGBE_CB(skb)->page_released = false;
1569 } else {
1570 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1571
1572 dma_sync_single_range_for_cpu(rx_ring->dev,
1573 IXGBE_CB(skb)->dma,
1574 frag->page_offset,
1575 ixgbe_rx_bufsz(rx_ring),
1576 DMA_FROM_DEVICE);
1577 }
1578 IXGBE_CB(skb)->dma = 0;
1579}
1580
1581/**
Alexander Duyckf8003262012-03-03 02:35:52 +00001582 * ixgbe_cleanup_headers - Correct corrupted or empty headers
1583 * @rx_ring: rx descriptor ring packet is being transacted on
1584 * @rx_desc: pointer to the EOP Rx descriptor
1585 * @skb: pointer to current skb being fixed
1586 *
1587 * Check for corrupted packet headers caused by senders on the local L2
1588 * embedded NIC switch not setting up their Tx Descriptors right. These
1589 * should be very rare.
1590 *
1591 * Also address the case where we are pulling data in on pages only
1592 * and as such no data is present in the skb header.
1593 *
1594 * In addition if skb is not at least 60 bytes we need to pad it so that
1595 * it is large enough to qualify as a valid Ethernet frame.
1596 *
1597 * Returns true if an error was encountered and skb was freed.
1598 **/
1599static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
1600 union ixgbe_adv_rx_desc *rx_desc,
1601 struct sk_buff *skb)
1602{
Alexander Duyckf8003262012-03-03 02:35:52 +00001603 struct net_device *netdev = rx_ring->netdev;
Alexander Duyckf8003262012-03-03 02:35:52 +00001604
1605 /* verify that the packet does not have any known errors */
1606 if (unlikely(ixgbe_test_staterr(rx_desc,
1607 IXGBE_RXDADV_ERR_FRAME_ERR_MASK) &&
1608 !(netdev->features & NETIF_F_RXALL))) {
1609 dev_kfree_skb_any(skb);
1610 return true;
1611 }
1612
Alexander Duyck19861ce2012-07-20 08:08:33 +00001613 /* place header in linear portion of buffer */
Alexander Duyckcf3fe7a2012-07-20 08:08:39 +00001614 if (skb_is_nonlinear(skb))
1615 ixgbe_pull_tail(rx_ring, skb);
Alexander Duyckf8003262012-03-03 02:35:52 +00001616
Alexander Duyck57efd442012-06-25 21:54:46 +00001617#ifdef IXGBE_FCOE
1618 /* do not attempt to pad FCoE Frames as this will disrupt DDP */
1619 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc))
1620 return false;
1621
1622#endif
Alexander Duyckf8003262012-03-03 02:35:52 +00001623 /* if skb_pad returns an error the skb was freed */
1624 if (unlikely(skb->len < 60)) {
1625 int pad_len = 60 - skb->len;
1626
1627 if (skb_pad(skb, pad_len))
1628 return true;
1629 __skb_put(skb, pad_len);
1630 }
1631
1632 return false;
1633}
1634
1635/**
Alexander Duyckf8003262012-03-03 02:35:52 +00001636 * ixgbe_reuse_rx_page - page flip buffer and store it back on the ring
1637 * @rx_ring: rx descriptor ring to store buffers on
1638 * @old_buff: donor buffer to have page reused
1639 *
Alexander Duyck0549ae22012-07-20 08:08:18 +00001640 * Synchronizes page for reuse by the adapter
Alexander Duyckf8003262012-03-03 02:35:52 +00001641 **/
1642static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
1643 struct ixgbe_rx_buffer *old_buff)
1644{
1645 struct ixgbe_rx_buffer *new_buff;
1646 u16 nta = rx_ring->next_to_alloc;
Alexander Duyckf8003262012-03-03 02:35:52 +00001647
1648 new_buff = &rx_ring->rx_buffer_info[nta];
1649
1650 /* update, and store next to alloc */
1651 nta++;
1652 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1653
1654 /* transfer page from old buffer to new buffer */
1655 new_buff->page = old_buff->page;
1656 new_buff->dma = old_buff->dma;
Alexander Duyck0549ae22012-07-20 08:08:18 +00001657 new_buff->page_offset = old_buff->page_offset;
Alexander Duyckf8003262012-03-03 02:35:52 +00001658
1659 /* sync the buffer for use by the device */
1660 dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
Alexander Duyck0549ae22012-07-20 08:08:18 +00001661 new_buff->page_offset,
1662 ixgbe_rx_bufsz(rx_ring),
Alexander Duyckf8003262012-03-03 02:35:52 +00001663 DMA_FROM_DEVICE);
Alexander Duyckf8003262012-03-03 02:35:52 +00001664}
1665
1666/**
1667 * ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff
1668 * @rx_ring: rx descriptor ring to transact packets on
1669 * @rx_buffer: buffer containing page to add
1670 * @rx_desc: descriptor containing length of buffer written by hardware
1671 * @skb: sk_buff to place the data into
1672 *
Alexander Duyck0549ae22012-07-20 08:08:18 +00001673 * This function will add the data contained in rx_buffer->page to the skb.
1674 * This is done either through a direct copy if the data in the buffer is
1675 * less than the skb header size, otherwise it will just attach the page as
1676 * a frag to the skb.
1677 *
1678 * The function will then update the page offset if necessary and return
1679 * true if the buffer can be reused by the adapter.
Alexander Duyckf8003262012-03-03 02:35:52 +00001680 **/
Alexander Duyck0549ae22012-07-20 08:08:18 +00001681static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
Alexander Duyckf8003262012-03-03 02:35:52 +00001682 struct ixgbe_rx_buffer *rx_buffer,
Alexander Duyck0549ae22012-07-20 08:08:18 +00001683 union ixgbe_adv_rx_desc *rx_desc,
1684 struct sk_buff *skb)
Alexander Duyckf8003262012-03-03 02:35:52 +00001685{
Alexander Duyck0549ae22012-07-20 08:08:18 +00001686 struct page *page = rx_buffer->page;
1687 unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
Alexander Duyck09816fb2012-07-20 08:08:23 +00001688#if (PAGE_SIZE < 8192)
Alexander Duyck0549ae22012-07-20 08:08:18 +00001689 unsigned int truesize = ixgbe_rx_bufsz(rx_ring);
Alexander Duyck09816fb2012-07-20 08:08:23 +00001690#else
1691 unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
1692 unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) -
1693 ixgbe_rx_bufsz(rx_ring);
1694#endif
Alexander Duyck0549ae22012-07-20 08:08:18 +00001695
Alexander Duyckcf3fe7a2012-07-20 08:08:39 +00001696 if ((size <= IXGBE_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
1697 unsigned char *va = page_address(page) + rx_buffer->page_offset;
1698
1699 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
1700
1701 /* we can reuse buffer as-is, just make sure it is local */
1702 if (likely(page_to_nid(page) == numa_node_id()))
1703 return true;
1704
1705 /* this page cannot be reused so discard it */
1706 put_page(page);
1707 return false;
1708 }
1709
Alexander Duyck0549ae22012-07-20 08:08:18 +00001710 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
1711 rx_buffer->page_offset, size, truesize);
1712
Alexander Duyck09816fb2012-07-20 08:08:23 +00001713 /* avoid re-using remote pages */
1714 if (unlikely(page_to_nid(page) != numa_node_id()))
1715 return false;
1716
1717#if (PAGE_SIZE < 8192)
1718 /* if we are only owner of page we can reuse it */
1719 if (unlikely(page_count(page) != 1))
Alexander Duyck0549ae22012-07-20 08:08:18 +00001720 return false;
1721
1722 /* flip page offset to other buffer */
1723 rx_buffer->page_offset ^= truesize;
1724
Alexander Duyck09816fb2012-07-20 08:08:23 +00001725 /*
1726 * since we are the only owner of the page and we need to
1727 * increment it, just set the value to 2 in order to avoid
1728 * an unecessary locked operation
1729 */
1730 atomic_set(&page->_count, 2);
1731#else
1732 /* move offset up to the next cache line */
1733 rx_buffer->page_offset += truesize;
1734
1735 if (rx_buffer->page_offset > last_offset)
1736 return false;
1737
Alexander Duyck0549ae22012-07-20 08:08:18 +00001738 /* bump ref count on page before it is given to the stack */
1739 get_page(page);
Alexander Duyck09816fb2012-07-20 08:08:23 +00001740#endif
Alexander Duyck0549ae22012-07-20 08:08:18 +00001741
1742 return true;
Alexander Duyckf8003262012-03-03 02:35:52 +00001743}
1744
Alexander Duyck18806c92012-07-20 08:08:44 +00001745static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring,
1746 union ixgbe_adv_rx_desc *rx_desc)
1747{
1748 struct ixgbe_rx_buffer *rx_buffer;
1749 struct sk_buff *skb;
1750 struct page *page;
1751
1752 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
1753 page = rx_buffer->page;
1754 prefetchw(page);
1755
1756 skb = rx_buffer->skb;
1757
1758 if (likely(!skb)) {
1759 void *page_addr = page_address(page) +
1760 rx_buffer->page_offset;
1761
1762 /* prefetch first cache line of first page */
1763 prefetch(page_addr);
1764#if L1_CACHE_BYTES < 128
1765 prefetch(page_addr + L1_CACHE_BYTES);
1766#endif
1767
1768 /* allocate a skb to store the frags */
1769 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1770 IXGBE_RX_HDR_SIZE);
1771 if (unlikely(!skb)) {
1772 rx_ring->rx_stats.alloc_rx_buff_failed++;
1773 return NULL;
1774 }
1775
1776 /*
1777 * we will be copying header into skb->data in
1778 * pskb_may_pull so it is in our interest to prefetch
1779 * it now to avoid a possible cache miss
1780 */
1781 prefetchw(skb->data);
1782
1783 /*
1784 * Delay unmapping of the first packet. It carries the
1785 * header information, HW may still access the header
1786 * after the writeback. Only unmap it when EOP is
1787 * reached
1788 */
1789 if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
1790 goto dma_sync;
1791
1792 IXGBE_CB(skb)->dma = rx_buffer->dma;
1793 } else {
1794 if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
1795 ixgbe_dma_sync_frag(rx_ring, skb);
1796
1797dma_sync:
1798 /* we are reusing so sync this buffer for CPU use */
1799 dma_sync_single_range_for_cpu(rx_ring->dev,
1800 rx_buffer->dma,
1801 rx_buffer->page_offset,
1802 ixgbe_rx_bufsz(rx_ring),
1803 DMA_FROM_DEVICE);
1804 }
1805
1806 /* pull page into skb */
1807 if (ixgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
1808 /* hand second half of page back to the ring */
1809 ixgbe_reuse_rx_page(rx_ring, rx_buffer);
1810 } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
1811 /* the page has been released from the ring */
1812 IXGBE_CB(skb)->page_released = true;
1813 } else {
1814 /* we are not reusing the buffer so unmap it */
1815 dma_unmap_page(rx_ring->dev, rx_buffer->dma,
1816 ixgbe_rx_pg_size(rx_ring),
1817 DMA_FROM_DEVICE);
1818 }
1819
1820 /* clear contents of buffer_info */
1821 rx_buffer->skb = NULL;
1822 rx_buffer->dma = 0;
1823 rx_buffer->page = NULL;
1824
1825 return skb;
Alexander Duyckf8003262012-03-03 02:35:52 +00001826}
1827
1828/**
1829 * ixgbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
1830 * @q_vector: structure containing interrupt and ring information
1831 * @rx_ring: rx descriptor ring to transact packets on
1832 * @budget: Total limit on number of packets to process
1833 *
1834 * This function provides a "bounce buffer" approach to Rx interrupt
1835 * processing. The advantage to this is that on systems that have
1836 * expensive overhead for IOMMU access this provides a means of avoiding
1837 * it by maintaining the mapping of the page to the syste.
1838 *
1839 * Returns true if all work is completed without reaching budget
1840 **/
Alexander Duyck4ff7fb12011-08-31 00:01:11 +00001841static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
Joe Perchese8e9f692010-09-07 21:34:53 +00001842 struct ixgbe_ring *rx_ring,
Alexander Duyckf4de00e2012-09-25 00:29:37 +00001843 const int budget)
Auke Kok9a799d72007-09-15 14:07:45 -07001844{
Ayyappan Veeraiyand2f4fbe2008-02-01 15:59:19 -08001845 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
Ben Greear3f2d1c02012-03-08 08:28:41 +00001846#ifdef IXGBE_FCOE
Alexander Duyckf8003262012-03-03 02:35:52 +00001847 struct ixgbe_adapter *adapter = q_vector->adapter;
Mark Rustad4ffdf912012-07-18 06:05:50 +00001848 int ddp_bytes;
1849 unsigned int mss = 0;
Yi Zou3d8fd382009-06-08 14:38:44 +00001850#endif /* IXGBE_FCOE */
Alexander Duyckf8003262012-03-03 02:35:52 +00001851 u16 cleaned_count = ixgbe_desc_unused(rx_ring);
Auke Kok9a799d72007-09-15 14:07:45 -07001852
Alexander Duyckf8003262012-03-03 02:35:52 +00001853 do {
Alexander Duyckf8003262012-03-03 02:35:52 +00001854 union ixgbe_adv_rx_desc *rx_desc;
1855 struct sk_buff *skb;
Auke Kok9a799d72007-09-15 14:07:45 -07001856
Alexander Duyckf8003262012-03-03 02:35:52 +00001857 /* return some buffers to hardware, one at a time is too slow */
1858 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
1859 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
1860 cleaned_count = 0;
1861 }
Auke Kok9a799d72007-09-15 14:07:45 -07001862
Alexander Duyck18806c92012-07-20 08:08:44 +00001863 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
Auke Kok9a799d72007-09-15 14:07:45 -07001864
Alexander Duyckf8003262012-03-03 02:35:52 +00001865 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD))
1866 break;
Alexander Duyckc267fc12010-11-16 19:27:00 -08001867
Alexander Duyckf8003262012-03-03 02:35:52 +00001868 /*
1869 * This memory barrier is needed to keep us from reading
1870 * any other fields out of the rx_desc until we know the
1871 * RXD_STAT_DD bit is set
1872 */
1873 rmb();
Auke Kok9a799d72007-09-15 14:07:45 -07001874
Alexander Duyck18806c92012-07-20 08:08:44 +00001875 /* retrieve a buffer from the ring */
1876 skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc);
Alexander Duyckf8003262012-03-03 02:35:52 +00001877
Alexander Duyck18806c92012-07-20 08:08:44 +00001878 /* exit if we failed to retrieve a buffer */
1879 if (!skb)
1880 break;
Alexander Duyck4c1975d2012-01-31 02:59:23 +00001881
Auke Kok9a799d72007-09-15 14:07:45 -07001882 cleaned_count++;
Alexander Duyckf8212f92009-04-27 22:42:37 +00001883
Alexander Duyckf8003262012-03-03 02:35:52 +00001884 /* place incomplete frames back on ring for completion */
1885 if (ixgbe_is_non_eop(rx_ring, rx_desc, skb))
1886 continue;
Alexander Duyckf8212f92009-04-27 22:42:37 +00001887
Alexander Duyckf8003262012-03-03 02:35:52 +00001888 /* verify the packet layout is correct */
1889 if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb))
1890 continue;
Ayyappan Veeraiyand2f4fbe2008-02-01 15:59:19 -08001891
1892 /* probably a little skewed due to removing CRC */
1893 total_rx_bytes += skb->len;
Ayyappan Veeraiyand2f4fbe2008-02-01 15:59:19 -08001894
Alexander Duyck8a0da212012-01-31 02:59:49 +00001895 /* populate checksum, timestamp, VLAN, and protocol */
1896 ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
1897
Yi Zou332d4a72009-05-13 13:11:53 +00001898#ifdef IXGBE_FCOE
1899 /* if ddp, not passing to ULD unless for FCP_RSP or error */
Alexander Duyck57efd442012-06-25 21:54:46 +00001900 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) {
Alexander Duyckf56e0cb2012-01-31 02:59:39 +00001901 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
Mark Rustad4ffdf912012-07-18 06:05:50 +00001902 /* include DDPed FCoE data */
1903 if (ddp_bytes > 0) {
1904 if (!mss) {
1905 mss = rx_ring->netdev->mtu -
1906 sizeof(struct fcoe_hdr) -
1907 sizeof(struct fc_frame_header) -
1908 sizeof(struct fcoe_crc_eof);
1909 if (mss > 512)
1910 mss &= ~511;
1911 }
1912 total_rx_bytes += ddp_bytes;
1913 total_rx_packets += DIV_ROUND_UP(ddp_bytes,
1914 mss);
1915 }
David S. Miller823dcd22011-08-20 10:39:12 -07001916 if (!ddp_bytes) {
1917 dev_kfree_skb_any(skb);
Alexander Duyckf8003262012-03-03 02:35:52 +00001918 continue;
David S. Miller823dcd22011-08-20 10:39:12 -07001919 }
Yi Zou3d8fd382009-06-08 14:38:44 +00001920 }
Alexander Duyckf8003262012-03-03 02:35:52 +00001921
Yi Zou332d4a72009-05-13 13:11:53 +00001922#endif /* IXGBE_FCOE */
Alexander Duyck8a0da212012-01-31 02:59:49 +00001923 ixgbe_rx_skb(q_vector, skb);
Auke Kok9a799d72007-09-15 14:07:45 -07001924
Alexander Duyckf8003262012-03-03 02:35:52 +00001925 /* update budget accounting */
Alexander Duyckf4de00e2012-09-25 00:29:37 +00001926 total_rx_packets++;
1927 } while (likely(total_rx_packets < budget));
Auke Kok9a799d72007-09-15 14:07:45 -07001928
Alexander Duyckc267fc12010-11-16 19:27:00 -08001929 u64_stats_update_begin(&rx_ring->syncp);
1930 rx_ring->stats.packets += total_rx_packets;
1931 rx_ring->stats.bytes += total_rx_bytes;
1932 u64_stats_update_end(&rx_ring->syncp);
Alexander Duyckbd198052011-06-11 01:45:08 +00001933 q_vector->rx.total_packets += total_rx_packets;
1934 q_vector->rx.total_bytes += total_rx_bytes;
Alexander Duyck4ff7fb12011-08-31 00:01:11 +00001935
Alexander Duyckf8003262012-03-03 02:35:52 +00001936 if (cleaned_count)
1937 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
1938
Alexander Duyckf4de00e2012-09-25 00:29:37 +00001939 return (total_rx_packets < budget);
Auke Kok9a799d72007-09-15 14:07:45 -07001940}
1941
Auke Kok9a799d72007-09-15 14:07:45 -07001942/**
1943 * ixgbe_configure_msix - Configure MSI-X hardware
1944 * @adapter: board private structure
1945 *
1946 * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
1947 * interrupts.
1948 **/
1949static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1950{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001951 struct ixgbe_q_vector *q_vector;
Alexander Duyck49c7ffb2012-05-05 05:30:43 +00001952 int v_idx;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001953 u32 mask;
Auke Kok9a799d72007-09-15 14:07:45 -07001954
Alexander Duyck8e34d1a2011-07-15 07:29:49 +00001955 /* Populate MSIX to EITR Select */
1956 if (adapter->num_vfs > 32) {
1957 u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
1958 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
1959 }
1960
Jesse Brandeburg4df10462009-03-13 22:15:31 +00001961 /*
1962 * Populate the IVAR table and set the ITR values to the
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001963 * corresponding register.
1964 */
Alexander Duyck49c7ffb2012-05-05 05:30:43 +00001965 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
Alexander Duyckefe3d3c2011-07-15 03:05:21 +00001966 struct ixgbe_ring *ring;
Alexander Duyck7a921c92009-05-06 10:43:28 +00001967 q_vector = adapter->q_vector[v_idx];
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001968
Alexander Duycka5579282012-02-08 07:50:04 +00001969 ixgbe_for_each_ring(ring, q_vector->rx)
Alexander Duyckefe3d3c2011-07-15 03:05:21 +00001970 ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001971
Alexander Duycka5579282012-02-08 07:50:04 +00001972 ixgbe_for_each_ring(ring, q_vector->tx)
Alexander Duyckefe3d3c2011-07-15 03:05:21 +00001973 ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001974
Alexander Duyckfe49f042009-06-04 16:00:09 +00001975 ixgbe_write_eitr(q_vector);
Auke Kok9a799d72007-09-15 14:07:45 -07001976 }
1977
Alexander Duyckbd508172010-11-16 19:27:03 -08001978 switch (adapter->hw.mac.type) {
1979 case ixgbe_mac_82598EB:
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001980 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
Joe Perchese8e9f692010-09-07 21:34:53 +00001981 v_idx);
Alexander Duyckbd508172010-11-16 19:27:03 -08001982 break;
1983 case ixgbe_mac_82599EB:
Don Skidmoreb93a2222010-11-16 19:27:17 -08001984 case ixgbe_mac_X540:
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001985 ixgbe_set_ivar(adapter, -1, 1, v_idx);
Alexander Duyckbd508172010-11-16 19:27:03 -08001986 break;
Alexander Duyckbd508172010-11-16 19:27:03 -08001987 default:
1988 break;
1989 }
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001990 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
Auke Kok9a799d72007-09-15 14:07:45 -07001991
Jesse Brandeburg41fb9242008-09-11 19:55:58 -07001992 /* set up to autoclear timer, and the vectors */
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001993 mask = IXGBE_EIMS_ENABLE_MASK;
Emil Tantilovd5bf4f62011-08-31 00:01:16 +00001994 mask &= ~(IXGBE_EIMS_OTHER |
1995 IXGBE_EIMS_MAILBOX |
1996 IXGBE_EIMS_LSC);
1997
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001998 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
Auke Kok9a799d72007-09-15 14:07:45 -07001999}
2000
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08002001enum latency_range {
2002 lowest_latency = 0,
2003 low_latency = 1,
2004 bulk_latency = 2,
2005 latency_invalid = 255
2006};
2007
2008/**
2009 * ixgbe_update_itr - update the dynamic ITR value based on statistics
Alexander Duyckbd198052011-06-11 01:45:08 +00002010 * @q_vector: structure containing interrupt and ring information
2011 * @ring_container: structure containing ring performance data
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08002012 *
2013 * Stores a new ITR value based on packets and byte
2014 * counts during the last interrupt. The advantage of per interrupt
2015 * computation is faster updates and more accurate ITR for the current
2016 * traffic pattern. Constants in this function were computed
2017 * based on theoretical maximum wire speed and thresholds were set based
2018 * on testing data as well as attempting to minimize response time
2019 * while increasing bulk throughput.
2020 * this functionality is controlled by the InterruptThrottleRate module
2021 * parameter (see ixgbe_param.c)
2022 **/
Alexander Duyckbd198052011-06-11 01:45:08 +00002023static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
2024 struct ixgbe_ring_container *ring_container)
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08002025{
Alexander Duyckbd198052011-06-11 01:45:08 +00002026 int bytes = ring_container->total_bytes;
2027 int packets = ring_container->total_packets;
2028 u32 timepassed_us;
Alexander Duyck621bd702012-02-08 07:50:20 +00002029 u64 bytes_perint;
Alexander Duyckbd198052011-06-11 01:45:08 +00002030 u8 itr_setting = ring_container->itr;
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08002031
2032 if (packets == 0)
Alexander Duyckbd198052011-06-11 01:45:08 +00002033 return;
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08002034
2035 /* simple throttlerate management
Alexander Duyck621bd702012-02-08 07:50:20 +00002036 * 0-10MB/s lowest (100000 ints/s)
2037 * 10-20MB/s low (20000 ints/s)
2038 * 20-1249MB/s bulk (8000 ints/s)
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08002039 */
2040 /* what was last interrupt timeslice? */
Emil Tantilovd5bf4f62011-08-31 00:01:16 +00002041 timepassed_us = q_vector->itr >> 2;
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08002042 bytes_perint = bytes / timepassed_us; /* bytes/usec */
2043
2044 switch (itr_setting) {
2045 case lowest_latency:
Alexander Duyck621bd702012-02-08 07:50:20 +00002046 if (bytes_perint > 10)
Alexander Duyckbd198052011-06-11 01:45:08 +00002047 itr_setting = low_latency;
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08002048 break;
2049 case low_latency:
Alexander Duyck621bd702012-02-08 07:50:20 +00002050 if (bytes_perint > 20)
Alexander Duyckbd198052011-06-11 01:45:08 +00002051 itr_setting = bulk_latency;
Alexander Duyck621bd702012-02-08 07:50:20 +00002052 else if (bytes_perint <= 10)
Alexander Duyckbd198052011-06-11 01:45:08 +00002053 itr_setting = lowest_latency;
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08002054 break;
2055 case bulk_latency:
Alexander Duyck621bd702012-02-08 07:50:20 +00002056 if (bytes_perint <= 20)
Alexander Duyckbd198052011-06-11 01:45:08 +00002057 itr_setting = low_latency;
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08002058 break;
2059 }
2060
Alexander Duyckbd198052011-06-11 01:45:08 +00002061 /* clear work counters since we have the values we need */
2062 ring_container->total_bytes = 0;
2063 ring_container->total_packets = 0;
2064
2065 /* write updated itr to ring container */
2066 ring_container->itr = itr_setting;
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08002067}
2068
Jesse Brandeburg509ee932009-03-13 22:13:28 +00002069/**
2070 * ixgbe_write_eitr - write EITR register in hardware specific way
Alexander Duyckfe49f042009-06-04 16:00:09 +00002071 * @q_vector: structure containing interrupt and ring information
Jesse Brandeburg509ee932009-03-13 22:13:28 +00002072 *
2073 * This function is made to be called by ethtool and by the driver
2074 * when it needs to update EITR registers at runtime. Hardware
2075 * specific quirks/differences are taken care of here.
2076 */
Alexander Duyckfe49f042009-06-04 16:00:09 +00002077void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
Jesse Brandeburg509ee932009-03-13 22:13:28 +00002078{
Alexander Duyckfe49f042009-06-04 16:00:09 +00002079 struct ixgbe_adapter *adapter = q_vector->adapter;
Jesse Brandeburg509ee932009-03-13 22:13:28 +00002080 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyckfe49f042009-06-04 16:00:09 +00002081 int v_idx = q_vector->v_idx;
Alexander Duyck5d967eb2012-02-08 07:49:43 +00002082 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
Alexander Duyckfe49f042009-06-04 16:00:09 +00002083
Alexander Duyckbd508172010-11-16 19:27:03 -08002084 switch (adapter->hw.mac.type) {
2085 case ixgbe_mac_82598EB:
Jesse Brandeburg509ee932009-03-13 22:13:28 +00002086 /* must write high and low 16 bits to reset counter */
2087 itr_reg |= (itr_reg << 16);
Alexander Duyckbd508172010-11-16 19:27:03 -08002088 break;
2089 case ixgbe_mac_82599EB:
Don Skidmoreb93a2222010-11-16 19:27:17 -08002090 case ixgbe_mac_X540:
Jesse Brandeburg509ee932009-03-13 22:13:28 +00002091 /*
2092 * set the WDIS bit to not clear the timer bits and cause an
2093 * immediate assertion of the interrupt
2094 */
2095 itr_reg |= IXGBE_EITR_CNT_WDIS;
Alexander Duyckbd508172010-11-16 19:27:03 -08002096 break;
2097 default:
2098 break;
Jesse Brandeburg509ee932009-03-13 22:13:28 +00002099 }
2100 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
2101}
2102
Alexander Duyckbd198052011-06-11 01:45:08 +00002103static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector)
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08002104{
Emil Tantilovd5bf4f62011-08-31 00:01:16 +00002105 u32 new_itr = q_vector->itr;
Alexander Duyckbd198052011-06-11 01:45:08 +00002106 u8 current_itr;
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08002107
Alexander Duyckbd198052011-06-11 01:45:08 +00002108 ixgbe_update_itr(q_vector, &q_vector->tx);
2109 ixgbe_update_itr(q_vector, &q_vector->rx);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08002110
Alexander Duyck08c88332011-06-11 01:45:03 +00002111 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08002112
2113 switch (current_itr) {
2114 /* counts and packets in update_itr are dependent on these numbers */
2115 case lowest_latency:
Emil Tantilovd5bf4f62011-08-31 00:01:16 +00002116 new_itr = IXGBE_100K_ITR;
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08002117 break;
2118 case low_latency:
Emil Tantilovd5bf4f62011-08-31 00:01:16 +00002119 new_itr = IXGBE_20K_ITR;
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08002120 break;
2121 case bulk_latency:
Emil Tantilovd5bf4f62011-08-31 00:01:16 +00002122 new_itr = IXGBE_8K_ITR;
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08002123 break;
Alexander Duyckbd198052011-06-11 01:45:08 +00002124 default:
2125 break;
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08002126 }
2127
Emil Tantilovd5bf4f62011-08-31 00:01:16 +00002128 if (new_itr != q_vector->itr) {
Alexander Duyckfe49f042009-06-04 16:00:09 +00002129 /* do an exponential smoothing */
Emil Tantilovd5bf4f62011-08-31 00:01:16 +00002130 new_itr = (10 * new_itr * q_vector->itr) /
2131 ((9 * new_itr) + q_vector->itr);
Jesse Brandeburg509ee932009-03-13 22:13:28 +00002132
Alexander Duyckbd198052011-06-11 01:45:08 +00002133 /* save the algorithm value here */
Alexander Duyck5d967eb2012-02-08 07:49:43 +00002134 q_vector->itr = new_itr;
Alexander Duyckfe49f042009-06-04 16:00:09 +00002135
2136 ixgbe_write_eitr(q_vector);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08002137 }
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08002138}
2139
Mallikarjuna R Chilakala119fc602010-05-20 23:07:06 -07002140/**
Alexander Duyckde88eee2012-02-08 07:49:59 +00002141 * ixgbe_check_overtemp_subtask - check for over temperature
Alexander Duyckf0f97782011-04-22 04:08:09 +00002142 * @adapter: pointer to adapter
Mallikarjuna R Chilakala119fc602010-05-20 23:07:06 -07002143 **/
Alexander Duyckf0f97782011-04-22 04:08:09 +00002144static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
Mallikarjuna R Chilakala119fc602010-05-20 23:07:06 -07002145{
Mallikarjuna R Chilakala119fc602010-05-20 23:07:06 -07002146 struct ixgbe_hw *hw = &adapter->hw;
2147 u32 eicr = adapter->interrupt_event;
2148
Alexander Duyckf0f97782011-04-22 04:08:09 +00002149 if (test_bit(__IXGBE_DOWN, &adapter->state))
Joe Perches7ca647b2010-09-07 21:35:40 +00002150 return;
Mallikarjuna R Chilakala119fc602010-05-20 23:07:06 -07002151
Alexander Duyckf0f97782011-04-22 04:08:09 +00002152 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
2153 !(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT))
2154 return;
2155
2156 adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2157
Joe Perches7ca647b2010-09-07 21:35:40 +00002158 switch (hw->device_id) {
Alexander Duyckf0f97782011-04-22 04:08:09 +00002159 case IXGBE_DEV_ID_82599_T3_LOM:
2160 /*
2161 * Since the warning interrupt is for both ports
2162 * we don't have to check if:
2163 * - This interrupt wasn't for our port.
2164 * - We may have missed the interrupt so always have to
2165 * check if we got a LSC
2166 */
2167 if (!(eicr & IXGBE_EICR_GPI_SDP0) &&
2168 !(eicr & IXGBE_EICR_LSC))
2169 return;
Mallikarjuna R Chilakala119fc602010-05-20 23:07:06 -07002170
Alexander Duyckf0f97782011-04-22 04:08:09 +00002171 if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) {
2172 u32 autoneg;
2173 bool link_up = false;
2174
Joe Perches7ca647b2010-09-07 21:35:40 +00002175 hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
2176
Alexander Duyckf0f97782011-04-22 04:08:09 +00002177 if (link_up)
2178 return;
2179 }
2180
2181 /* Check if this is not due to overtemp */
2182 if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP)
2183 return;
2184
2185 break;
Joe Perches7ca647b2010-09-07 21:35:40 +00002186 default:
2187 if (!(eicr & IXGBE_EICR_GPI_SDP0))
2188 return;
2189 break;
2190 }
2191 e_crit(drv,
2192 "Network adapter has been stopped because it has over heated. "
2193 "Restart the computer. If the problem persists, "
2194 "power off the system and replace the adapter\n");
Alexander Duyckf0f97782011-04-22 04:08:09 +00002195
2196 adapter->interrupt_event = 0;
Mallikarjuna R Chilakala119fc602010-05-20 23:07:06 -07002197}
2198
Jesse Brandeburg0befdb32008-10-31 00:46:40 -07002199static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
2200{
2201 struct ixgbe_hw *hw = &adapter->hw;
2202
2203 if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
2204 (eicr & IXGBE_EICR_GPI_SDP1)) {
Emil Tantilov396e7992010-07-01 20:05:12 +00002205 e_crit(probe, "Fan has stopped, replace the adapter\n");
Jesse Brandeburg0befdb32008-10-31 00:46:40 -07002206 /* write to clear the interrupt */
2207 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
2208 }
2209}
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07002210
Jacob Keller4f51bf72011-08-20 04:49:45 +00002211static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr)
2212{
2213 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE))
2214 return;
2215
2216 switch (adapter->hw.mac.type) {
2217 case ixgbe_mac_82599EB:
2218 /*
2219 * Need to check link state so complete overtemp check
2220 * on service task
2221 */
2222 if (((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)) &&
2223 (!test_bit(__IXGBE_DOWN, &adapter->state))) {
2224 adapter->interrupt_event = eicr;
2225 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2226 ixgbe_service_event_schedule(adapter);
2227 return;
2228 }
2229 return;
2230 case ixgbe_mac_X540:
2231 if (!(eicr & IXGBE_EICR_TS))
2232 return;
2233 break;
2234 default:
2235 return;
2236 }
2237
2238 e_crit(drv,
2239 "Network adapter has been stopped because it has over heated. "
2240 "Restart the computer. If the problem persists, "
2241 "power off the system and replace the adapter\n");
2242}
2243
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002244static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
2245{
2246 struct ixgbe_hw *hw = &adapter->hw;
2247
Alexander Duyck73c4b7c2010-11-16 19:26:57 -08002248 if (eicr & IXGBE_EICR_GPI_SDP2) {
2249 /* Clear the interrupt */
2250 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
Alexander Duyck70864002011-04-27 09:13:56 +00002251 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2252 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
2253 ixgbe_service_event_schedule(adapter);
2254 }
Alexander Duyck73c4b7c2010-11-16 19:26:57 -08002255 }
2256
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002257 if (eicr & IXGBE_EICR_GPI_SDP1) {
2258 /* Clear the interrupt */
2259 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
Alexander Duyck70864002011-04-27 09:13:56 +00002260 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2261 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
2262 ixgbe_service_event_schedule(adapter);
2263 }
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002264 }
2265}
2266
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07002267static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
2268{
2269 struct ixgbe_hw *hw = &adapter->hw;
2270
2271 adapter->lsc_int++;
2272 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2273 adapter->link_check_timeout = jiffies;
2274 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2275 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
Nelson, Shannon8a0717f2009-11-12 18:47:11 +00002276 IXGBE_WRITE_FLUSH(hw);
Alexander Duyck93c52dd2011-04-22 04:07:54 +00002277 ixgbe_service_event_schedule(adapter);
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07002278 }
2279}
2280
Alexander Duyckfe49f042009-06-04 16:00:09 +00002281static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
2282 u64 qmask)
2283{
2284 u32 mask;
Alexander Duyckbd508172010-11-16 19:27:03 -08002285 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyckfe49f042009-06-04 16:00:09 +00002286
Alexander Duyckbd508172010-11-16 19:27:03 -08002287 switch (hw->mac.type) {
2288 case ixgbe_mac_82598EB:
Alexander Duyckfe49f042009-06-04 16:00:09 +00002289 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
Alexander Duyckbd508172010-11-16 19:27:03 -08002290 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2291 break;
2292 case ixgbe_mac_82599EB:
Don Skidmoreb93a2222010-11-16 19:27:17 -08002293 case ixgbe_mac_X540:
Alexander Duyckfe49f042009-06-04 16:00:09 +00002294 mask = (qmask & 0xFFFFFFFF);
Alexander Duyckbd508172010-11-16 19:27:03 -08002295 if (mask)
2296 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
Alexander Duyckfe49f042009-06-04 16:00:09 +00002297 mask = (qmask >> 32);
Alexander Duyckbd508172010-11-16 19:27:03 -08002298 if (mask)
2299 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2300 break;
2301 default:
2302 break;
Alexander Duyckfe49f042009-06-04 16:00:09 +00002303 }
2304 /* skip the flush */
2305}
2306
2307static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
Joe Perchese8e9f692010-09-07 21:34:53 +00002308 u64 qmask)
Alexander Duyckfe49f042009-06-04 16:00:09 +00002309{
2310 u32 mask;
Alexander Duyckbd508172010-11-16 19:27:03 -08002311 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyckfe49f042009-06-04 16:00:09 +00002312
Alexander Duyckbd508172010-11-16 19:27:03 -08002313 switch (hw->mac.type) {
2314 case ixgbe_mac_82598EB:
Alexander Duyckfe49f042009-06-04 16:00:09 +00002315 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
Alexander Duyckbd508172010-11-16 19:27:03 -08002316 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2317 break;
2318 case ixgbe_mac_82599EB:
Don Skidmoreb93a2222010-11-16 19:27:17 -08002319 case ixgbe_mac_X540:
Alexander Duyckfe49f042009-06-04 16:00:09 +00002320 mask = (qmask & 0xFFFFFFFF);
Alexander Duyckbd508172010-11-16 19:27:03 -08002321 if (mask)
2322 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
Alexander Duyckfe49f042009-06-04 16:00:09 +00002323 mask = (qmask >> 32);
Alexander Duyckbd508172010-11-16 19:27:03 -08002324 if (mask)
2325 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2326 break;
2327 default:
2328 break;
Alexander Duyckfe49f042009-06-04 16:00:09 +00002329 }
2330 /* skip the flush */
2331}
2332
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002333/**
Alexander Duyck2c4af692011-07-15 07:29:55 +00002334 * ixgbe_irq_enable - Enable default interrupt generation settings
2335 * @adapter: board private structure
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002336 **/
Alexander Duyck2c4af692011-07-15 07:29:55 +00002337static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
2338 bool flush)
Auke Kok9a799d72007-09-15 14:07:45 -07002339{
Alexander Duyck2c4af692011-07-15 07:29:55 +00002340 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07002341
Alexander Duyck2c4af692011-07-15 07:29:55 +00002342 /* don't reenable LSC while waiting for link */
2343 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
2344 mask &= ~IXGBE_EIMS_LSC;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002345
Alexander Duyck2c4af692011-07-15 07:29:55 +00002346 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
Jacob Keller4f51bf72011-08-20 04:49:45 +00002347 switch (adapter->hw.mac.type) {
2348 case ixgbe_mac_82599EB:
2349 mask |= IXGBE_EIMS_GPI_SDP0;
2350 break;
2351 case ixgbe_mac_X540:
2352 mask |= IXGBE_EIMS_TS;
2353 break;
2354 default:
2355 break;
2356 }
Alexander Duyck2c4af692011-07-15 07:29:55 +00002357 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
2358 mask |= IXGBE_EIMS_GPI_SDP1;
2359 switch (adapter->hw.mac.type) {
2360 case ixgbe_mac_82599EB:
Alexander Duyck2c4af692011-07-15 07:29:55 +00002361 mask |= IXGBE_EIMS_GPI_SDP1;
2362 mask |= IXGBE_EIMS_GPI_SDP2;
Don Skidmore858bc082011-08-04 09:28:30 +00002363 case ixgbe_mac_X540:
2364 mask |= IXGBE_EIMS_ECC;
Alexander Duyck2c4af692011-07-15 07:29:55 +00002365 mask |= IXGBE_EIMS_MAILBOX;
2366 break;
2367 default:
2368 break;
2369 }
Jacob Kellerdb0677f2012-08-24 07:46:54 +00002370
Jacob Kellerdb0677f2012-08-24 07:46:54 +00002371 if (adapter->hw.mac.type == ixgbe_mac_X540)
2372 mask |= IXGBE_EIMS_TIMESYNC;
Jacob Kellerdb0677f2012-08-24 07:46:54 +00002373
Alexander Duyck2c4af692011-07-15 07:29:55 +00002374 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
2375 !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
2376 mask |= IXGBE_EIMS_FLOW_DIR;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002377
Alexander Duyck2c4af692011-07-15 07:29:55 +00002378 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
2379 if (queues)
2380 ixgbe_irq_enable_queues(adapter, ~0);
2381 if (flush)
2382 IXGBE_WRITE_FLUSH(&adapter->hw);
Auke Kok9a799d72007-09-15 14:07:45 -07002383}
2384
Alexander Duyck2c4af692011-07-15 07:29:55 +00002385static irqreturn_t ixgbe_msix_other(int irq, void *data)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002386{
Alexander Duyck2c4af692011-07-15 07:29:55 +00002387 struct ixgbe_adapter *adapter = data;
2388 struct ixgbe_hw *hw = &adapter->hw;
2389 u32 eicr;
Alexander Duyck91281fd2009-06-04 16:00:27 +00002390
Alexander Duyck2c4af692011-07-15 07:29:55 +00002391 /*
2392 * Workaround for Silicon errata. Use clear-by-write instead
2393 * of clear-by-read. Reading with EICS will return the
2394 * interrupt causes without clearing, which later be done
2395 * with the write to EICR.
2396 */
2397 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2398 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
Alexander Duyck91281fd2009-06-04 16:00:27 +00002399
Alexander Duyck2c4af692011-07-15 07:29:55 +00002400 if (eicr & IXGBE_EICR_LSC)
2401 ixgbe_check_lsc(adapter);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002402
Alexander Duyck2c4af692011-07-15 07:29:55 +00002403 if (eicr & IXGBE_EICR_MAILBOX)
2404 ixgbe_msg_task(adapter);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002405
Alexander Duyck2c4af692011-07-15 07:29:55 +00002406 switch (hw->mac.type) {
2407 case ixgbe_mac_82599EB:
2408 case ixgbe_mac_X540:
2409 if (eicr & IXGBE_EICR_ECC)
2410 e_info(link, "Received unrecoverable ECC Err, please "
2411 "reboot\n");
2412 /* Handle Flow Director Full threshold interrupt */
2413 if (eicr & IXGBE_EICR_FLOW_DIR) {
2414 int reinit_count = 0;
2415 int i;
2416 for (i = 0; i < adapter->num_tx_queues; i++) {
2417 struct ixgbe_ring *ring = adapter->tx_ring[i];
2418 if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
2419 &ring->state))
2420 reinit_count++;
2421 }
2422 if (reinit_count) {
2423 /* no more flow director interrupts until after init */
2424 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
2425 adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
2426 ixgbe_service_event_schedule(adapter);
2427 }
2428 }
2429 ixgbe_check_sfp_event(adapter, eicr);
Jacob Keller4f51bf72011-08-20 04:49:45 +00002430 ixgbe_check_overtemp_event(adapter, eicr);
Alexander Duyck2c4af692011-07-15 07:29:55 +00002431 break;
2432 default:
2433 break;
Auke Kok9a799d72007-09-15 14:07:45 -07002434 }
2435
Alexander Duyck2c4af692011-07-15 07:29:55 +00002436 ixgbe_check_fan_failure(adapter, eicr);
Jacob Kellerdb0677f2012-08-24 07:46:54 +00002437
Jacob Kellerdb0677f2012-08-24 07:46:54 +00002438 if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
2439 ixgbe_ptp_check_pps_event(adapter, eicr);
Auke Kok9a799d72007-09-15 14:07:45 -07002440
Alexander Duyck2c4af692011-07-15 07:29:55 +00002441 /* re-enable the original interrupt state, no lsc, no queues */
Alexander Duyckefe3d3c2011-07-15 03:05:21 +00002442 if (!test_bit(__IXGBE_DOWN, &adapter->state))
Alexander Duyck2c4af692011-07-15 07:29:55 +00002443 ixgbe_irq_enable(adapter, false, false);
Alexander Duyck91281fd2009-06-04 16:00:27 +00002444
Alexander Duyck2c4af692011-07-15 07:29:55 +00002445 return IRQ_HANDLED;
2446}
2447
Alexander Duyck4ff7fb12011-08-31 00:01:11 +00002448static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data)
Auke Kok9a799d72007-09-15 14:07:45 -07002449{
2450 struct ixgbe_q_vector *q_vector = data;
2451
Auke Kok9a799d72007-09-15 14:07:45 -07002452 /* EIAM disabled interrupts (on this vector) for us */
Alexander Duyck4ff7fb12011-08-31 00:01:11 +00002453
2454 if (q_vector->rx.ring || q_vector->tx.ring)
2455 napi_schedule(&q_vector->napi);
Auke Kok9a799d72007-09-15 14:07:45 -07002456
2457 return IRQ_HANDLED;
Alexander Duyck91281fd2009-06-04 16:00:27 +00002458}
2459
Auke Kok9a799d72007-09-15 14:07:45 -07002460/**
Alexander Duyckeb01b972012-02-08 07:51:27 +00002461 * ixgbe_poll - NAPI Rx polling callback
2462 * @napi: structure for representing this polling device
2463 * @budget: how many packets driver is allowed to clean
2464 *
2465 * This function is used for legacy and MSI, NAPI mode
2466 **/
Jeff Kirsher8af3c332012-02-18 07:08:14 +00002467int ixgbe_poll(struct napi_struct *napi, int budget)
Alexander Duyckeb01b972012-02-08 07:51:27 +00002468{
2469 struct ixgbe_q_vector *q_vector =
2470 container_of(napi, struct ixgbe_q_vector, napi);
2471 struct ixgbe_adapter *adapter = q_vector->adapter;
2472 struct ixgbe_ring *ring;
2473 int per_ring_budget;
2474 bool clean_complete = true;
2475
2476#ifdef CONFIG_IXGBE_DCA
2477 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
2478 ixgbe_update_dca(q_vector);
2479#endif
2480
2481 ixgbe_for_each_ring(ring, q_vector->tx)
2482 clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring);
2483
2484 /* attempt to distribute budget to each queue fairly, but don't allow
2485 * the budget to go below 1 because we'll exit polling */
2486 if (q_vector->rx.count > 1)
2487 per_ring_budget = max(budget/q_vector->rx.count, 1);
2488 else
2489 per_ring_budget = budget;
2490
2491 ixgbe_for_each_ring(ring, q_vector->rx)
2492 clean_complete &= ixgbe_clean_rx_irq(q_vector, ring,
2493 per_ring_budget);
2494
2495 /* If all work not completed, return budget and keep polling */
2496 if (!clean_complete)
2497 return budget;
2498
2499 /* all work done, exit the polling mode */
2500 napi_complete(napi);
2501 if (adapter->rx_itr_setting & 1)
2502 ixgbe_set_itr(q_vector);
2503 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2504 ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
2505
2506 return 0;
2507}
2508
2509/**
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002510 * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
2511 * @adapter: board private structure
2512 *
2513 * ixgbe_request_msix_irqs allocates MSI-X vectors and requests
2514 * interrupts from the kernel.
2515 **/
2516static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2517{
2518 struct net_device *netdev = adapter->netdev;
Alexander Duyck207867f2011-07-15 03:05:37 +00002519 int vector, err;
Joe Perchese8e9f692010-09-07 21:34:53 +00002520 int ri = 0, ti = 0;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002521
Alexander Duyck49c7ffb2012-05-05 05:30:43 +00002522 for (vector = 0; vector < adapter->num_q_vectors; vector++) {
Alexander Duyckd0759eb2010-11-16 19:27:09 -08002523 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
Alexander Duyck207867f2011-07-15 03:05:37 +00002524 struct msix_entry *entry = &adapter->msix_entries[vector];
Robert Olssoncb13fc22008-11-25 16:43:52 -08002525
Alexander Duyck4ff7fb12011-08-31 00:01:11 +00002526 if (q_vector->tx.ring && q_vector->rx.ring) {
Don Skidmore9fe93af2010-12-03 09:33:54 +00002527 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
Alexander Duyck4ff7fb12011-08-31 00:01:11 +00002528 "%s-%s-%d", netdev->name, "TxRx", ri++);
Alexander Duyck32aa77a2010-11-16 19:26:59 -08002529 ti++;
Alexander Duyck4ff7fb12011-08-31 00:01:11 +00002530 } else if (q_vector->rx.ring) {
2531 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2532 "%s-%s-%d", netdev->name, "rx", ri++);
2533 } else if (q_vector->tx.ring) {
2534 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2535 "%s-%s-%d", netdev->name, "tx", ti++);
Alexander Duyckd0759eb2010-11-16 19:27:09 -08002536 } else {
2537 /* skip this unused q_vector */
2538 continue;
Alexander Duyck32aa77a2010-11-16 19:26:59 -08002539 }
Alexander Duyck207867f2011-07-15 03:05:37 +00002540 err = request_irq(entry->vector, &ixgbe_msix_clean_rings, 0,
2541 q_vector->name, q_vector);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002542 if (err) {
Emil Tantilov396e7992010-07-01 20:05:12 +00002543 e_err(probe, "request_irq failed for MSIX interrupt "
Emil Tantilov849c4542010-06-03 16:53:41 +00002544 "Error: %d\n", err);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002545 goto free_queue_irqs;
2546 }
Alexander Duyck207867f2011-07-15 03:05:37 +00002547 /* If Flow Director is enabled, set interrupt affinity */
2548 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
2549 /* assign the mask for this irq */
2550 irq_set_affinity_hint(entry->vector,
Alexander Duyckde88eee2012-02-08 07:49:59 +00002551 &q_vector->affinity_mask);
Alexander Duyck207867f2011-07-15 03:05:37 +00002552 }
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002553 }
2554
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002555 err = request_irq(adapter->msix_entries[vector].vector,
Alexander Duyck2c4af692011-07-15 07:29:55 +00002556 ixgbe_msix_other, 0, netdev->name, adapter);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002557 if (err) {
Alexander Duyckde88eee2012-02-08 07:49:59 +00002558 e_err(probe, "request_irq for msix_other failed: %d\n", err);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002559 goto free_queue_irqs;
2560 }
2561
2562 return 0;
2563
2564free_queue_irqs:
Alexander Duyck207867f2011-07-15 03:05:37 +00002565 while (vector) {
2566 vector--;
2567 irq_set_affinity_hint(adapter->msix_entries[vector].vector,
2568 NULL);
2569 free_irq(adapter->msix_entries[vector].vector,
2570 adapter->q_vector[vector]);
2571 }
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002572 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
2573 pci_disable_msix(adapter->pdev);
2574 kfree(adapter->msix_entries);
2575 adapter->msix_entries = NULL;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002576 return err;
2577}
2578
Alexey Dobriyan79aefa42008-11-19 14:17:02 -08002579/**
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002580 * ixgbe_intr - legacy mode Interrupt Handler
Auke Kok9a799d72007-09-15 14:07:45 -07002581 * @irq: interrupt number
2582 * @data: pointer to a network interface device structure
Auke Kok9a799d72007-09-15 14:07:45 -07002583 **/
2584static irqreturn_t ixgbe_intr(int irq, void *data)
2585{
Alexander Duycka65151ba22011-05-27 05:31:32 +00002586 struct ixgbe_adapter *adapter = data;
Auke Kok9a799d72007-09-15 14:07:45 -07002587 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck7a921c92009-05-06 10:43:28 +00002588 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9a799d72007-09-15 14:07:45 -07002589 u32 eicr;
2590
Don Skidmore54037502009-02-21 15:42:56 -08002591 /*
Alexander Duyck24ddd962012-02-10 02:08:32 +00002592 * Workaround for silicon errata #26 on 82598. Mask the interrupt
Don Skidmore54037502009-02-21 15:42:56 -08002593 * before the read of EICR.
2594 */
2595 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
2596
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002597 /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
Stephen Hemminger52f33af2011-12-22 16:34:52 +00002598 * therefore no explicit interrupt disable is necessary */
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002599 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
Jesse Brandeburgf47cf662008-09-11 19:56:14 -07002600 if (!eicr) {
Emil Tantilov6af3b9e2010-09-29 21:35:23 +00002601 /*
2602 * shared interrupt alert!
Jesse Brandeburgf47cf662008-09-11 19:56:14 -07002603 * make sure interrupts are enabled because the read will
Emil Tantilov6af3b9e2010-09-29 21:35:23 +00002604 * have disabled interrupts due to EIAM
2605 * finish the workaround of silicon errata on 82598. Unmask
2606 * the interrupt that we masked before the EICR read.
2607 */
2608 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2609 ixgbe_irq_enable(adapter, true, true);
Auke Kok9a799d72007-09-15 14:07:45 -07002610 return IRQ_NONE; /* Not our interrupt */
Jesse Brandeburgf47cf662008-09-11 19:56:14 -07002611 }
Auke Kok9a799d72007-09-15 14:07:45 -07002612
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07002613 if (eicr & IXGBE_EICR_LSC)
2614 ixgbe_check_lsc(adapter);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002615
Alexander Duyckbd508172010-11-16 19:27:03 -08002616 switch (hw->mac.type) {
2617 case ixgbe_mac_82599EB:
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002618 ixgbe_check_sfp_event(adapter, eicr);
Don Skidmore0ccb9742011-08-04 02:07:48 +00002619 /* Fall through */
2620 case ixgbe_mac_X540:
2621 if (eicr & IXGBE_EICR_ECC)
2622 e_info(link, "Received unrecoverable ECC err, please "
2623 "reboot\n");
Jacob Keller4f51bf72011-08-20 04:49:45 +00002624 ixgbe_check_overtemp_event(adapter, eicr);
Alexander Duyckbd508172010-11-16 19:27:03 -08002625 break;
2626 default:
2627 break;
2628 }
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002629
Jesse Brandeburg0befdb32008-10-31 00:46:40 -07002630 ixgbe_check_fan_failure(adapter, eicr);
Jacob Kellerdb0677f2012-08-24 07:46:54 +00002631 if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
2632 ixgbe_ptp_check_pps_event(adapter, eicr);
Jesse Brandeburg0befdb32008-10-31 00:46:40 -07002633
Alexander Duyckb9f6ed22012-02-08 07:49:54 +00002634 /* would disable interrupts here but EIAM disabled it */
2635 napi_schedule(&q_vector->napi);
Auke Kok9a799d72007-09-15 14:07:45 -07002636
Emil Tantilov6af3b9e2010-09-29 21:35:23 +00002637 /*
2638 * re-enable link(maybe) and non-queue interrupts, no flush.
2639 * ixgbe_poll will re-enable the queue interrupts
2640 */
Emil Tantilov6af3b9e2010-09-29 21:35:23 +00002641 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2642 ixgbe_irq_enable(adapter, false, false);
2643
Auke Kok9a799d72007-09-15 14:07:45 -07002644 return IRQ_HANDLED;
2645}
2646
2647/**
2648 * ixgbe_request_irq - initialize interrupts
2649 * @adapter: board private structure
2650 *
2651 * Attempts to configure interrupts using the best available
2652 * capabilities of the hardware and kernel.
2653 **/
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002654static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
Auke Kok9a799d72007-09-15 14:07:45 -07002655{
2656 struct net_device *netdev = adapter->netdev;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002657 int err;
Auke Kok9a799d72007-09-15 14:07:45 -07002658
Alexander Duyck4cc6df22011-07-15 03:05:51 +00002659 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002660 err = ixgbe_request_msix_irqs(adapter);
Alexander Duyck4cc6df22011-07-15 03:05:51 +00002661 else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED)
Joe Perchesa0607fd2009-11-18 23:29:17 -08002662 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
Alexander Duycka65151ba22011-05-27 05:31:32 +00002663 netdev->name, adapter);
Alexander Duyck4cc6df22011-07-15 03:05:51 +00002664 else
Joe Perchesa0607fd2009-11-18 23:29:17 -08002665 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
Alexander Duycka65151ba22011-05-27 05:31:32 +00002666 netdev->name, adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07002667
Alexander Duyckde88eee2012-02-08 07:49:59 +00002668 if (err)
Emil Tantilov396e7992010-07-01 20:05:12 +00002669 e_err(probe, "request_irq failed, Error %d\n", err);
Auke Kok9a799d72007-09-15 14:07:45 -07002670
Auke Kok9a799d72007-09-15 14:07:45 -07002671 return err;
2672}
2673
2674static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
2675{
Alexander Duyck49c7ffb2012-05-05 05:30:43 +00002676 int vector;
Auke Kok9a799d72007-09-15 14:07:45 -07002677
Alexander Duyck49c7ffb2012-05-05 05:30:43 +00002678 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
Alexander Duycka65151ba22011-05-27 05:31:32 +00002679 free_irq(adapter->pdev->irq, adapter);
Alexander Duyck49c7ffb2012-05-05 05:30:43 +00002680 return;
Auke Kok9a799d72007-09-15 14:07:45 -07002681 }
Alexander Duyck49c7ffb2012-05-05 05:30:43 +00002682
2683 for (vector = 0; vector < adapter->num_q_vectors; vector++) {
2684 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
2685 struct msix_entry *entry = &adapter->msix_entries[vector];
2686
2687 /* free only the irqs that were actually requested */
2688 if (!q_vector->rx.ring && !q_vector->tx.ring)
2689 continue;
2690
2691 /* clear the affinity_mask in the IRQ descriptor */
2692 irq_set_affinity_hint(entry->vector, NULL);
2693
2694 free_irq(entry->vector, q_vector);
2695 }
2696
2697 free_irq(adapter->msix_entries[vector++].vector, adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07002698}
2699
2700/**
Jesse Brandeburg22d5a712009-03-19 01:24:04 +00002701 * ixgbe_irq_disable - Mask off interrupt generation on the NIC
2702 * @adapter: board private structure
2703 **/
2704static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
2705{
Alexander Duyckbd508172010-11-16 19:27:03 -08002706 switch (adapter->hw.mac.type) {
2707 case ixgbe_mac_82598EB:
Nelson, Shannon835462f2009-04-27 22:42:54 +00002708 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
Alexander Duyckbd508172010-11-16 19:27:03 -08002709 break;
2710 case ixgbe_mac_82599EB:
Don Skidmoreb93a2222010-11-16 19:27:17 -08002711 case ixgbe_mac_X540:
Nelson, Shannon835462f2009-04-27 22:42:54 +00002712 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
2713 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
Jesse Brandeburg22d5a712009-03-19 01:24:04 +00002714 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
Alexander Duyckbd508172010-11-16 19:27:03 -08002715 break;
2716 default:
2717 break;
Jesse Brandeburg22d5a712009-03-19 01:24:04 +00002718 }
2719 IXGBE_WRITE_FLUSH(&adapter->hw);
2720 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
Alexander Duyck49c7ffb2012-05-05 05:30:43 +00002721 int vector;
2722
2723 for (vector = 0; vector < adapter->num_q_vectors; vector++)
2724 synchronize_irq(adapter->msix_entries[vector].vector);
2725
2726 synchronize_irq(adapter->msix_entries[vector++].vector);
Jesse Brandeburg22d5a712009-03-19 01:24:04 +00002727 } else {
2728 synchronize_irq(adapter->pdev->irq);
2729 }
2730}
2731
Jesse Brandeburg22d5a712009-03-19 01:24:04 +00002732/**
Auke Kok9a799d72007-09-15 14:07:45 -07002733 * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
2734 *
2735 **/
2736static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
2737{
Emil Tantilovd5bf4f62011-08-31 00:01:16 +00002738 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9a799d72007-09-15 14:07:45 -07002739
Emil Tantilovd5bf4f62011-08-31 00:01:16 +00002740 ixgbe_write_eitr(q_vector);
Auke Kok9a799d72007-09-15 14:07:45 -07002741
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002742 ixgbe_set_ivar(adapter, 0, 0, 0);
2743 ixgbe_set_ivar(adapter, 1, 0, 0);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002744
Emil Tantilov396e7992010-07-01 20:05:12 +00002745 e_info(hw, "Legacy interrupt IVAR setup done\n");
Auke Kok9a799d72007-09-15 14:07:45 -07002746}
2747
Alexander Duyck43e69bf2010-08-19 13:35:12 +00002748/**
2749 * ixgbe_configure_tx_ring - Configure 8259x Tx ring after Reset
2750 * @adapter: board private structure
2751 * @ring: structure containing ring specific data
2752 *
2753 * Configure the Tx descriptor ring after a reset.
2754 **/
Alexander Duyck84418e32010-08-19 13:40:54 +00002755void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
2756 struct ixgbe_ring *ring)
Alexander Duyck43e69bf2010-08-19 13:35:12 +00002757{
2758 struct ixgbe_hw *hw = &adapter->hw;
2759 u64 tdba = ring->dma;
Alexander Duyck2f1860b2010-08-19 13:39:43 +00002760 int wait_loop = 10;
Alexander Duyckb88c6de2011-07-15 03:06:12 +00002761 u32 txdctl = IXGBE_TXDCTL_ENABLE;
Alexander Duyckbf29ee62010-11-16 19:27:07 -08002762 u8 reg_idx = ring->reg_idx;
Alexander Duyck43e69bf2010-08-19 13:35:12 +00002763
Alexander Duyck2f1860b2010-08-19 13:39:43 +00002764 /* disable queue to avoid issues while updating state */
Alexander Duyckb88c6de2011-07-15 03:06:12 +00002765 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0);
Alexander Duyck2f1860b2010-08-19 13:39:43 +00002766 IXGBE_WRITE_FLUSH(hw);
2767
Alexander Duyck43e69bf2010-08-19 13:35:12 +00002768 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx),
Joe Perchese8e9f692010-09-07 21:34:53 +00002769 (tdba & DMA_BIT_MASK(32)));
Alexander Duyck43e69bf2010-08-19 13:35:12 +00002770 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32));
2771 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx),
2772 ring->count * sizeof(union ixgbe_adv_tx_desc));
2773 IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
2774 IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
Alexander Duyck84ea2592010-11-16 19:26:49 -08002775 ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx);
Alexander Duyck43e69bf2010-08-19 13:35:12 +00002776
Alexander Duyckb88c6de2011-07-15 03:06:12 +00002777 /*
2778 * set WTHRESH to encourage burst writeback, it should not be set
2779 * higher than 1 when ITR is 0 as it could cause false TX hangs
2780 *
2781 * In order to avoid issues WTHRESH + PTHRESH should always be equal
2782 * to or less than the number of on chip descriptors, which is
2783 * currently 40.
2784 */
Alexander Duycke954b372012-02-08 07:49:38 +00002785 if (!ring->q_vector || (ring->q_vector->itr < 8))
Alexander Duyckb88c6de2011-07-15 03:06:12 +00002786 txdctl |= (1 << 16); /* WTHRESH = 1 */
2787 else
2788 txdctl |= (8 << 16); /* WTHRESH = 8 */
2789
Alexander Duycke954b372012-02-08 07:49:38 +00002790 /*
2791 * Setting PTHRESH to 32 both improves performance
2792 * and avoids a TX hang with DFP enabled
2793 */
Alexander Duyckb88c6de2011-07-15 03:06:12 +00002794 txdctl |= (1 << 8) | /* HTHRESH = 1 */
2795 32; /* PTHRESH = 32 */
Alexander Duyck2f1860b2010-08-19 13:39:43 +00002796
2797 /* reinitialize flowdirector state */
Alexander Duyck39cb6812012-06-06 05:38:20 +00002798 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
Alexander Duyckee9e0f02010-11-16 19:27:01 -08002799 ring->atr_sample_rate = adapter->atr_sample_rate;
2800 ring->atr_count = 0;
2801 set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
2802 } else {
2803 ring->atr_sample_rate = 0;
2804 }
Alexander Duyck2f1860b2010-08-19 13:39:43 +00002805
John Fastabendc84d3242010-11-16 19:27:12 -08002806 clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
2807
Alexander Duyck2f1860b2010-08-19 13:39:43 +00002808 /* enable queue */
Alexander Duyck2f1860b2010-08-19 13:39:43 +00002809 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
2810
2811 /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */
2812 if (hw->mac.type == ixgbe_mac_82598EB &&
2813 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
2814 return;
2815
2816 /* poll to verify queue is enabled */
2817 do {
Don Skidmore032b4322011-03-18 09:32:53 +00002818 usleep_range(1000, 2000);
Alexander Duyck2f1860b2010-08-19 13:39:43 +00002819 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
2820 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
2821 if (!wait_loop)
2822 e_err(drv, "Could not enable Tx Queue %d\n", reg_idx);
Alexander Duyck43e69bf2010-08-19 13:35:12 +00002823}
2824
Alexander Duyck120ff942010-08-19 13:34:50 +00002825static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
2826{
2827 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck671c0ad2012-05-18 06:34:02 +00002828 u32 rttdcs, mtqc;
John Fastabend8b1c0b22011-05-03 02:26:48 +00002829 u8 tcs = netdev_get_num_tc(adapter->netdev);
Alexander Duyck120ff942010-08-19 13:34:50 +00002830
2831 if (hw->mac.type == ixgbe_mac_82598EB)
2832 return;
2833
2834 /* disable the arbiter while setting MTQC */
2835 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2836 rttdcs |= IXGBE_RTTDCS_ARBDIS;
2837 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2838
2839 /* set transmit pool layout */
Alexander Duyck671c0ad2012-05-18 06:34:02 +00002840 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
2841 mtqc = IXGBE_MTQC_VT_ENA;
2842 if (tcs > 4)
2843 mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
2844 else if (tcs > 1)
2845 mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
2846 else if (adapter->ring_feature[RING_F_RSS].indices == 4)
2847 mtqc |= IXGBE_MTQC_32VF;
John Fastabend8b1c0b22011-05-03 02:26:48 +00002848 else
Alexander Duyck671c0ad2012-05-18 06:34:02 +00002849 mtqc |= IXGBE_MTQC_64VF;
2850 } else {
2851 if (tcs > 4)
2852 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
2853 else if (tcs > 1)
2854 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
2855 else
2856 mtqc = IXGBE_MTQC_64Q_1PB;
2857 }
John Fastabend8b1c0b22011-05-03 02:26:48 +00002858
Alexander Duyck671c0ad2012-05-18 06:34:02 +00002859 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
John Fastabend8b1c0b22011-05-03 02:26:48 +00002860
Alexander Duyck671c0ad2012-05-18 06:34:02 +00002861 /* Enable Security TX Buffer IFG for multiple pb */
2862 if (tcs) {
2863 u32 sectx = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
2864 sectx |= IXGBE_SECTX_DCB;
2865 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, sectx);
Alexander Duyck120ff942010-08-19 13:34:50 +00002866 }
2867
2868 /* re-enable the arbiter */
2869 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
2870 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2871}
2872
Auke Kok9a799d72007-09-15 14:07:45 -07002873/**
Jesse Brandeburg3a581072008-08-26 04:27:08 -07002874 * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
Auke Kok9a799d72007-09-15 14:07:45 -07002875 * @adapter: board private structure
2876 *
2877 * Configure the Tx unit of the MAC after a reset.
2878 **/
2879static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
2880{
Alexander Duyck2f1860b2010-08-19 13:39:43 +00002881 struct ixgbe_hw *hw = &adapter->hw;
2882 u32 dmatxctl;
Alexander Duyck43e69bf2010-08-19 13:35:12 +00002883 u32 i;
Auke Kok9a799d72007-09-15 14:07:45 -07002884
Alexander Duyck2f1860b2010-08-19 13:39:43 +00002885 ixgbe_setup_mtqc(adapter);
2886
2887 if (hw->mac.type != ixgbe_mac_82598EB) {
2888 /* DMATXCTL.EN must be before Tx queues are enabled */
2889 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2890 dmatxctl |= IXGBE_DMATXCTL_TE;
2891 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2892 }
2893
Auke Kok9a799d72007-09-15 14:07:45 -07002894 /* Setup the HW Tx Head and Tail descriptor pointers */
Alexander Duyck43e69bf2010-08-19 13:35:12 +00002895 for (i = 0; i < adapter->num_tx_queues; i++)
2896 ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
Auke Kok9a799d72007-09-15 14:07:45 -07002897}
2898
Alexander Duyck3ebe8fd2012-04-25 04:36:38 +00002899static void ixgbe_enable_rx_drop(struct ixgbe_adapter *adapter,
2900 struct ixgbe_ring *ring)
2901{
2902 struct ixgbe_hw *hw = &adapter->hw;
2903 u8 reg_idx = ring->reg_idx;
2904 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
2905
2906 srrctl |= IXGBE_SRRCTL_DROP_EN;
2907
2908 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
2909}
2910
2911static void ixgbe_disable_rx_drop(struct ixgbe_adapter *adapter,
2912 struct ixgbe_ring *ring)
2913{
2914 struct ixgbe_hw *hw = &adapter->hw;
2915 u8 reg_idx = ring->reg_idx;
2916 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
2917
2918 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
2919
2920 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
2921}
2922
2923#ifdef CONFIG_IXGBE_DCB
2924void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
2925#else
2926static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
2927#endif
2928{
2929 int i;
2930 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
2931
2932 if (adapter->ixgbe_ieee_pfc)
2933 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
2934
2935 /*
2936 * We should set the drop enable bit if:
2937 * SR-IOV is enabled
2938 * or
2939 * Number of Rx queues > 1 and flow control is disabled
2940 *
2941 * This allows us to avoid head of line blocking for security
2942 * and performance reasons.
2943 */
2944 if (adapter->num_vfs || (adapter->num_rx_queues > 1 &&
2945 !(adapter->hw.fc.current_mode & ixgbe_fc_tx_pause) && !pfc_en)) {
2946 for (i = 0; i < adapter->num_rx_queues; i++)
2947 ixgbe_enable_rx_drop(adapter, adapter->rx_ring[i]);
2948 } else {
2949 for (i = 0; i < adapter->num_rx_queues; i++)
2950 ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]);
2951 }
2952}
2953
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002954#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
Auke Kok9a799d72007-09-15 14:07:45 -07002955
Yi Zoua6616b42009-08-06 13:05:23 +00002956static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
Joe Perchese8e9f692010-09-07 21:34:53 +00002957 struct ixgbe_ring *rx_ring)
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07002958{
Alexander Duyck45e9baa2012-05-05 05:30:59 +00002959 struct ixgbe_hw *hw = &adapter->hw;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07002960 u32 srrctl;
Alexander Duyckbf29ee62010-11-16 19:27:07 -08002961 u8 reg_idx = rx_ring->reg_idx;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07002962
Alexander Duyck45e9baa2012-05-05 05:30:59 +00002963 if (hw->mac.type == ixgbe_mac_82598EB) {
2964 u16 mask = adapter->ring_feature[RING_F_RSS].mask;
2965
2966 /*
2967 * if VMDq is not active we must program one srrctl register
2968 * per RSS queue since we have enabled RDRXCTL.MVMEN
2969 */
2970 reg_idx &= mask;
Alexander Duyckbd508172010-11-16 19:27:03 -08002971 }
2972
Alexander Duyck45e9baa2012-05-05 05:30:59 +00002973 /* configure header buffer length, needed for RSC */
2974 srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07002975
Alexander Duyck45e9baa2012-05-05 05:30:59 +00002976 /* configure the packet buffer length */
Alexander Duyckf8003262012-03-03 02:35:52 +00002977 srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
Alexander Duyck45e9baa2012-05-05 05:30:59 +00002978
2979 /* configure descriptor type */
Alexander Duyckf8003262012-03-03 02:35:52 +00002980 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002981
Alexander Duyck45e9baa2012-05-05 05:30:59 +00002982 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07002983}
2984
Alexander Duyck05abb122010-08-19 13:35:41 +00002985static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00002986{
Alexander Duyck05abb122010-08-19 13:35:41 +00002987 struct ixgbe_hw *hw = &adapter->hw;
2988 static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
Joe Perchese8e9f692010-09-07 21:34:53 +00002989 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
2990 0x6A3E67EA, 0x14364D17, 0x3BED200D};
Alexander Duyck05abb122010-08-19 13:35:41 +00002991 u32 mrqc = 0, reta = 0;
2992 u32 rxcsum;
2993 int i, j;
Alexander Duyck671c0ad2012-05-18 06:34:02 +00002994 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
John Fastabend86b4db32011-04-26 07:26:19 +00002995
Alexander Duyck671c0ad2012-05-18 06:34:02 +00002996 /*
2997 * Program table for at least 2 queues w/ SR-IOV so that VFs can
2998 * make full use of any rings they may have. We will use the
2999 * PSRTYPE register to control how many rings we use within the PF.
3000 */
3001 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 2))
3002 rss_i = 2;
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00003003
Alexander Duyck05abb122010-08-19 13:35:41 +00003004 /* Fill out hash function seeds */
3005 for (i = 0; i < 10; i++)
3006 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00003007
Alexander Duyck05abb122010-08-19 13:35:41 +00003008 /* Fill out redirection table */
3009 for (i = 0, j = 0; i < 128; i++, j++) {
Alexander Duyck671c0ad2012-05-18 06:34:02 +00003010 if (j == rss_i)
Alexander Duyck05abb122010-08-19 13:35:41 +00003011 j = 0;
3012 /* reta = 4-byte sliding window of
3013 * 0x00..(indices-1)(indices-1)00..etc. */
3014 reta = (reta << 8) | (j * 0x11);
3015 if ((i & 3) == 3)
3016 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
3017 }
3018
3019 /* Disable indicating checksum in descriptor, enables RSS hash */
3020 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3021 rxcsum |= IXGBE_RXCSUM_PCSD;
3022 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3023
Alexander Duyck671c0ad2012-05-18 06:34:02 +00003024 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
Alexander Duyckfbe7ca72012-07-14 05:42:36 +00003025 if (adapter->ring_feature[RING_F_RSS].mask)
Alexander Duyck671c0ad2012-05-18 06:34:02 +00003026 mrqc = IXGBE_MRQC_RSSEN;
John Fastabend8b1c0b22011-05-03 02:26:48 +00003027 } else {
Alexander Duyck671c0ad2012-05-18 06:34:02 +00003028 u8 tcs = netdev_get_num_tc(adapter->netdev);
John Fastabend8b1c0b22011-05-03 02:26:48 +00003029
Alexander Duyck671c0ad2012-05-18 06:34:02 +00003030 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3031 if (tcs > 4)
3032 mrqc = IXGBE_MRQC_VMDQRT8TCEN; /* 8 TCs */
3033 else if (tcs > 1)
3034 mrqc = IXGBE_MRQC_VMDQRT4TCEN; /* 4 TCs */
3035 else if (adapter->ring_feature[RING_F_RSS].indices == 4)
3036 mrqc = IXGBE_MRQC_VMDQRSS32EN;
3037 else
3038 mrqc = IXGBE_MRQC_VMDQRSS64EN;
3039 } else {
3040 if (tcs > 4)
3041 mrqc = IXGBE_MRQC_RTRSS8TCEN;
3042 else if (tcs > 1)
John Fastabend8b1c0b22011-05-03 02:26:48 +00003043 mrqc = IXGBE_MRQC_RTRSS4TCEN;
3044 else
Alexander Duyck671c0ad2012-05-18 06:34:02 +00003045 mrqc = IXGBE_MRQC_RSSEN;
John Fastabend8b1c0b22011-05-03 02:26:48 +00003046 }
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00003047 }
3048
Alexander Duyck05abb122010-08-19 13:35:41 +00003049 /* Perform hash on these packet types */
Alexander Duyck671c0ad2012-05-18 06:34:02 +00003050 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 |
3051 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
3052 IXGBE_MRQC_RSS_FIELD_IPV6 |
3053 IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
Alexander Duyck05abb122010-08-19 13:35:41 +00003054
Alexander Duyckef6afc02012-02-08 07:51:53 +00003055 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
3056 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3057 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
3058 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3059
Alexander Duyck05abb122010-08-19 13:35:41 +00003060 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00003061}
3062
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07003063/**
Nelson, Shannonbb5a9ad2009-09-18 09:46:27 +00003064 * ixgbe_configure_rscctl - enable RSC for the indicated ring
3065 * @adapter: address of board private structure
3066 * @index: index of ring to set
Nelson, Shannonbb5a9ad2009-09-18 09:46:27 +00003067 **/
Don Skidmore082757a2011-07-21 05:55:00 +00003068static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
Alexander Duyck73670962010-08-19 13:38:34 +00003069 struct ixgbe_ring *ring)
Nelson, Shannonbb5a9ad2009-09-18 09:46:27 +00003070{
Nelson, Shannonbb5a9ad2009-09-18 09:46:27 +00003071 struct ixgbe_hw *hw = &adapter->hw;
Nelson, Shannonbb5a9ad2009-09-18 09:46:27 +00003072 u32 rscctrl;
Alexander Duyckbf29ee62010-11-16 19:27:07 -08003073 u8 reg_idx = ring->reg_idx;
Nelson, Shannonbb5a9ad2009-09-18 09:46:27 +00003074
Alexander Duyck7d637bc2010-11-16 19:26:56 -08003075 if (!ring_is_rsc_enabled(ring))
Alexander Duyck73670962010-08-19 13:38:34 +00003076 return;
3077
Alexander Duyck73670962010-08-19 13:38:34 +00003078 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
Nelson, Shannonbb5a9ad2009-09-18 09:46:27 +00003079 rscctrl |= IXGBE_RSCCTL_RSCEN;
3080 /*
3081 * we must limit the number of descriptors so that the
3082 * total size of max desc * buf_len is not greater
Alexander Duyck642c6802011-11-10 09:09:17 +00003083 * than 65536
Nelson, Shannonbb5a9ad2009-09-18 09:46:27 +00003084 */
Alexander Duyckf8003262012-03-03 02:35:52 +00003085 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
Alexander Duyck73670962010-08-19 13:38:34 +00003086 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
Nelson, Shannonbb5a9ad2009-09-18 09:46:27 +00003087}
3088
Alexander Duyck9e10e042010-08-19 13:40:06 +00003089#define IXGBE_MAX_RX_DESC_POLL 10
3090static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
3091 struct ixgbe_ring *ring)
3092{
3093 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck9e10e042010-08-19 13:40:06 +00003094 int wait_loop = IXGBE_MAX_RX_DESC_POLL;
3095 u32 rxdctl;
Alexander Duyckbf29ee62010-11-16 19:27:07 -08003096 u8 reg_idx = ring->reg_idx;
Alexander Duyck9e10e042010-08-19 13:40:06 +00003097
3098 /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */
3099 if (hw->mac.type == ixgbe_mac_82598EB &&
3100 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3101 return;
3102
3103 do {
Don Skidmore032b4322011-03-18 09:32:53 +00003104 usleep_range(1000, 2000);
Alexander Duyck9e10e042010-08-19 13:40:06 +00003105 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3106 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
3107
3108 if (!wait_loop) {
3109 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
3110 "the polling period\n", reg_idx);
3111 }
3112}
3113
Yi Zou2d39d572011-01-06 14:29:56 +00003114void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
3115 struct ixgbe_ring *ring)
3116{
3117 struct ixgbe_hw *hw = &adapter->hw;
3118 int wait_loop = IXGBE_MAX_RX_DESC_POLL;
3119 u32 rxdctl;
3120 u8 reg_idx = ring->reg_idx;
3121
3122 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3123 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
3124
3125 /* write value back with RXDCTL.ENABLE bit cleared */
3126 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
3127
3128 if (hw->mac.type == ixgbe_mac_82598EB &&
3129 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3130 return;
3131
3132 /* the hardware may take up to 100us to really disable the rx queue */
3133 do {
3134 udelay(10);
3135 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3136 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
3137
3138 if (!wait_loop) {
3139 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within "
3140 "the polling period\n", reg_idx);
3141 }
3142}
3143
Alexander Duyck84418e32010-08-19 13:40:54 +00003144void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
3145 struct ixgbe_ring *ring)
Alexander Duyckacd37172010-08-19 13:36:05 +00003146{
3147 struct ixgbe_hw *hw = &adapter->hw;
3148 u64 rdba = ring->dma;
Alexander Duyck9e10e042010-08-19 13:40:06 +00003149 u32 rxdctl;
Alexander Duyckbf29ee62010-11-16 19:27:07 -08003150 u8 reg_idx = ring->reg_idx;
Alexander Duyckacd37172010-08-19 13:36:05 +00003151
Alexander Duyck9e10e042010-08-19 13:40:06 +00003152 /* disable queue to avoid issues while updating state */
3153 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
Yi Zou2d39d572011-01-06 14:29:56 +00003154 ixgbe_disable_rx_queue(adapter, ring);
Alexander Duyck9e10e042010-08-19 13:40:06 +00003155
Alexander Duyckacd37172010-08-19 13:36:05 +00003156 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
3157 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
3158 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx),
3159 ring->count * sizeof(union ixgbe_adv_rx_desc));
3160 IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
3161 IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
Alexander Duyck84ea2592010-11-16 19:26:49 -08003162 ring->tail = hw->hw_addr + IXGBE_RDT(reg_idx);
Alexander Duyck9e10e042010-08-19 13:40:06 +00003163
3164 ixgbe_configure_srrctl(adapter, ring);
3165 ixgbe_configure_rscctl(adapter, ring);
3166
Greg Rosee9f98072011-01-26 01:06:07 +00003167 /* If operating in IOV mode set RLPML for X540 */
3168 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
3169 hw->mac.type == ixgbe_mac_X540) {
3170 rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
3171 rxdctl |= ((ring->netdev->mtu + ETH_HLEN +
3172 ETH_FCS_LEN + VLAN_HLEN) | IXGBE_RXDCTL_RLPML_EN);
3173 }
3174
Alexander Duyck9e10e042010-08-19 13:40:06 +00003175 if (hw->mac.type == ixgbe_mac_82598EB) {
3176 /*
3177 * enable cache line friendly hardware writes:
3178 * PTHRESH=32 descriptors (half the internal cache),
3179 * this also removes ugly rx_no_buffer_count increment
3180 * HTHRESH=4 descriptors (to minimize latency on fetch)
3181 * WTHRESH=8 burst writeback up to two cache lines
3182 */
3183 rxdctl &= ~0x3FFFFF;
3184 rxdctl |= 0x080420;
3185 }
3186
3187 /* enable receive descriptor ring */
3188 rxdctl |= IXGBE_RXDCTL_ENABLE;
3189 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
3190
3191 ixgbe_rx_desc_queue_enable(adapter, ring);
Alexander Duyck7d4987d2011-05-27 05:31:37 +00003192 ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
Alexander Duyckacd37172010-08-19 13:36:05 +00003193}
3194
Alexander Duyck48654522010-08-19 13:36:27 +00003195static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
3196{
3197 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyckfbe7ca72012-07-14 05:42:36 +00003198 int rss_i = adapter->ring_feature[RING_F_RSS].indices;
Alexander Duyck48654522010-08-19 13:36:27 +00003199 int p;
3200
3201 /* PSRTYPE must be initialized in non 82598 adapters */
3202 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
Joe Perchese8e9f692010-09-07 21:34:53 +00003203 IXGBE_PSRTYPE_UDPHDR |
3204 IXGBE_PSRTYPE_IPV4HDR |
Alexander Duyck48654522010-08-19 13:36:27 +00003205 IXGBE_PSRTYPE_L2HDR |
Joe Perchese8e9f692010-09-07 21:34:53 +00003206 IXGBE_PSRTYPE_IPV6HDR;
Alexander Duyck48654522010-08-19 13:36:27 +00003207
3208 if (hw->mac.type == ixgbe_mac_82598EB)
3209 return;
3210
Alexander Duyckfbe7ca72012-07-14 05:42:36 +00003211 if (rss_i > 3)
3212 psrtype |= 2 << 29;
3213 else if (rss_i > 1)
3214 psrtype |= 1 << 29;
Alexander Duyck48654522010-08-19 13:36:27 +00003215
3216 for (p = 0; p < adapter->num_rx_pools; p++)
Alexander Duyck1d9c0bf2012-05-05 05:32:21 +00003217 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(p)),
Alexander Duyck48654522010-08-19 13:36:27 +00003218 psrtype);
3219}
3220
Alexander Duyckf5b4a522010-08-19 13:38:57 +00003221static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
3222{
3223 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyckf5b4a522010-08-19 13:38:57 +00003224 u32 reg_offset, vf_shift;
Alexander Duyck435b19f2012-05-18 06:34:08 +00003225 u32 gcr_ext, vmdctl;
Greg Rosede4c7f62011-09-29 05:57:33 +00003226 int i;
Alexander Duyckf5b4a522010-08-19 13:38:57 +00003227
3228 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
3229 return;
3230
3231 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
Alexander Duyck435b19f2012-05-18 06:34:08 +00003232 vmdctl |= IXGBE_VMD_CTL_VMDQ_EN;
3233 vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
Alexander Duyck1d9c0bf2012-05-05 05:32:21 +00003234 vmdctl |= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT;
Alexander Duyck435b19f2012-05-18 06:34:08 +00003235 vmdctl |= IXGBE_VT_CTL_REPLEN;
3236 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
Alexander Duyckf5b4a522010-08-19 13:38:57 +00003237
Alexander Duyck1d9c0bf2012-05-05 05:32:21 +00003238 vf_shift = VMDQ_P(0) % 32;
3239 reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0;
Alexander Duyckf5b4a522010-08-19 13:38:57 +00003240
3241 /* Enable only the PF's pool for Tx/Rx */
Alexander Duyck435b19f2012-05-18 06:34:08 +00003242 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (~0) << vf_shift);
3243 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
3244 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift);
3245 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
Alexander Duyckf5b4a522010-08-19 13:38:57 +00003246
3247 /* Map PF MAC address in RAR Entry 0 to first pool following VFs */
Alexander Duyck1d9c0bf2012-05-05 05:32:21 +00003248 hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0));
Alexander Duyckf5b4a522010-08-19 13:38:57 +00003249
3250 /*
3251 * Set up VF register offsets for selected VT Mode,
3252 * i.e. 32 or 64 VFs for SR-IOV
3253 */
Alexander Duyck73079ea2012-07-14 06:48:49 +00003254 switch (adapter->ring_feature[RING_F_VMDQ].mask) {
3255 case IXGBE_82599_VMDQ_8Q_MASK:
3256 gcr_ext = IXGBE_GCR_EXT_VT_MODE_16;
3257 break;
3258 case IXGBE_82599_VMDQ_4Q_MASK:
3259 gcr_ext = IXGBE_GCR_EXT_VT_MODE_32;
3260 break;
3261 default:
3262 gcr_ext = IXGBE_GCR_EXT_VT_MODE_64;
3263 break;
3264 }
3265
Alexander Duyckf5b4a522010-08-19 13:38:57 +00003266 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
3267
Alexander Duyck435b19f2012-05-18 06:34:08 +00003268
Greg Rosea985b6c32010-11-18 03:02:52 +00003269 /* Enable MAC Anti-Spoofing */
Alexander Duyck435b19f2012-05-18 06:34:08 +00003270 hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0),
Greg Rosea985b6c32010-11-18 03:02:52 +00003271 adapter->num_vfs);
Greg Rosede4c7f62011-09-29 05:57:33 +00003272 /* For VFs that have spoof checking turned off */
3273 for (i = 0; i < adapter->num_vfs; i++) {
3274 if (!adapter->vfinfo[i].spoofchk_enabled)
3275 ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, false);
3276 }
Alexander Duyckf5b4a522010-08-19 13:38:57 +00003277}
3278
Alexander Duyck477de6e2010-08-19 13:38:11 +00003279static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
Auke Kok9a799d72007-09-15 14:07:45 -07003280{
Auke Kok9a799d72007-09-15 14:07:45 -07003281 struct ixgbe_hw *hw = &adapter->hw;
3282 struct net_device *netdev = adapter->netdev;
3283 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
Alexander Duyck477de6e2010-08-19 13:38:11 +00003284 struct ixgbe_ring *rx_ring;
3285 int i;
3286 u32 mhadd, hlreg0;
Alexander Duyck48654522010-08-19 13:36:27 +00003287
Alexander Duyck477de6e2010-08-19 13:38:11 +00003288#ifdef IXGBE_FCOE
3289 /* adjust max frame to be able to do baby jumbo for FCoE */
3290 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
3291 (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
3292 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
3293
3294#endif /* IXGBE_FCOE */
Alexander Duyck872844d2012-08-15 02:10:43 +00003295
3296 /* adjust max frame to be at least the size of a standard frame */
3297 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
3298 max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
3299
Alexander Duyck477de6e2010-08-19 13:38:11 +00003300 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3301 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
3302 mhadd &= ~IXGBE_MHADD_MFS_MASK;
3303 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
3304
3305 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
Auke Kok9a799d72007-09-15 14:07:45 -07003306 }
3307
Auke Kok9a799d72007-09-15 14:07:45 -07003308 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
Alexander Duyck477de6e2010-08-19 13:38:11 +00003309 /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */
3310 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
Auke Kok9a799d72007-09-15 14:07:45 -07003311 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
3312
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00003313 /*
3314 * Setup the HW Rx Head and Tail Descriptor Pointers and
3315 * the Base and Length of the Rx Descriptor Ring
3316 */
Auke Kok9a799d72007-09-15 14:07:45 -07003317 for (i = 0; i < adapter->num_rx_queues; i++) {
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00003318 rx_ring = adapter->rx_ring[i];
Alexander Duyck7d637bc2010-11-16 19:26:56 -08003319 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
3320 set_ring_rsc_enabled(rx_ring);
3321 else
3322 clear_ring_rsc_enabled(rx_ring);
Alexander Duyck477de6e2010-08-19 13:38:11 +00003323 }
Alexander Duyck477de6e2010-08-19 13:38:11 +00003324}
3325
Alexander Duyck73670962010-08-19 13:38:34 +00003326static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
3327{
3328 struct ixgbe_hw *hw = &adapter->hw;
3329 u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
3330
3331 switch (hw->mac.type) {
3332 case ixgbe_mac_82598EB:
3333 /*
3334 * For VMDq support of different descriptor types or
3335 * buffer sizes through the use of multiple SRRCTL
3336 * registers, RDRXCTL.MVMEN must be set to 1
3337 *
3338 * also, the manual doesn't mention it clearly but DCA hints
3339 * will only use queue 0's tags unless this bit is set. Side
3340 * effects of setting this bit are only that SRRCTL must be
3341 * fully programmed [0..15]
3342 */
3343 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
3344 break;
3345 case ixgbe_mac_82599EB:
Don Skidmoreb93a2222010-11-16 19:27:17 -08003346 case ixgbe_mac_X540:
Alexander Duyck73670962010-08-19 13:38:34 +00003347 /* Disable RSC for ACK packets */
3348 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
3349 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
3350 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
3351 /* hardware requires some bits to be set by default */
3352 rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX);
3353 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
3354 break;
3355 default:
3356 /* We should do nothing since we don't know this hardware */
3357 return;
3358 }
3359
3360 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
3361}
3362
Alexander Duyck477de6e2010-08-19 13:38:11 +00003363/**
3364 * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
3365 * @adapter: board private structure
3366 *
3367 * Configure the Rx unit of the MAC after a reset.
3368 **/
3369static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
3370{
3371 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck477de6e2010-08-19 13:38:11 +00003372 int i;
3373 u32 rxctrl;
Alexander Duyck477de6e2010-08-19 13:38:11 +00003374
3375 /* disable receives while setting up the descriptors */
3376 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3377 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
3378
3379 ixgbe_setup_psrtype(adapter);
Alexander Duyck73670962010-08-19 13:38:34 +00003380 ixgbe_setup_rdrxctl(adapter);
Alexander Duyck477de6e2010-08-19 13:38:11 +00003381
Alexander Duyck9e10e042010-08-19 13:40:06 +00003382 /* Program registers for the distribution of queues */
Alexander Duyckf5b4a522010-08-19 13:38:57 +00003383 ixgbe_setup_mrqc(adapter);
Alexander Duyckf5b4a522010-08-19 13:38:57 +00003384
Alexander Duyck477de6e2010-08-19 13:38:11 +00003385 /* set_rx_buffer_len must be called before ring initialization */
3386 ixgbe_set_rx_buffer_len(adapter);
3387
3388 /*
3389 * Setup the HW Rx Head and Tail Descriptor Pointers and
3390 * the Base and Length of the Rx Descriptor Ring
3391 */
Alexander Duyck9e10e042010-08-19 13:40:06 +00003392 for (i = 0; i < adapter->num_rx_queues; i++)
3393 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]);
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07003394
Alexander Duyck9e10e042010-08-19 13:40:06 +00003395 /* disable drop enable for 82598 parts */
3396 if (hw->mac.type == ixgbe_mac_82598EB)
3397 rxctrl |= IXGBE_RXCTRL_DMBYPS;
3398
3399 /* enable all receives */
3400 rxctrl |= IXGBE_RXCTRL_RXEN;
3401 hw->mac.ops.enable_rx_dma(hw, rxctrl);
Auke Kok9a799d72007-09-15 14:07:45 -07003402}
3403
Jiri Pirko8e586132011-12-08 19:52:37 -05003404static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
Auke Kok9a799d72007-09-15 14:07:45 -07003405{
3406 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07003407 struct ixgbe_hw *hw = &adapter->hw;
Auke Kok9a799d72007-09-15 14:07:45 -07003408
3409 /* add VID to filter table */
Alexander Duyck1d9c0bf2012-05-05 05:32:21 +00003410 hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true);
Jesse Grossf62bbb52010-10-20 13:56:10 +00003411 set_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05003412
3413 return 0;
Auke Kok9a799d72007-09-15 14:07:45 -07003414}
3415
Jiri Pirko8e586132011-12-08 19:52:37 -05003416static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
Auke Kok9a799d72007-09-15 14:07:45 -07003417{
3418 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07003419 struct ixgbe_hw *hw = &adapter->hw;
Auke Kok9a799d72007-09-15 14:07:45 -07003420
Auke Kok9a799d72007-09-15 14:07:45 -07003421 /* remove VID from filter table */
Alexander Duyck1d9c0bf2012-05-05 05:32:21 +00003422 hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), false);
Jesse Grossf62bbb52010-10-20 13:56:10 +00003423 clear_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05003424
3425 return 0;
Auke Kok9a799d72007-09-15 14:07:45 -07003426}
3427
Jesse Brandeburg5f6c0182010-04-14 16:04:23 -07003428/**
3429 * ixgbe_vlan_filter_disable - helper to disable hw vlan filtering
3430 * @adapter: driver data
3431 */
3432static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter)
3433{
3434 struct ixgbe_hw *hw = &adapter->hw;
Jesse Grossf62bbb52010-10-20 13:56:10 +00003435 u32 vlnctrl;
3436
3437 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3438 vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
3439 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3440}
3441
3442/**
3443 * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering
3444 * @adapter: driver data
3445 */
3446static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter)
3447{
3448 struct ixgbe_hw *hw = &adapter->hw;
3449 u32 vlnctrl;
3450
3451 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3452 vlnctrl |= IXGBE_VLNCTRL_VFE;
3453 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
3454 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3455}
3456
3457/**
3458 * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping
3459 * @adapter: driver data
3460 */
3461static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
3462{
3463 struct ixgbe_hw *hw = &adapter->hw;
3464 u32 vlnctrl;
Jesse Brandeburg5f6c0182010-04-14 16:04:23 -07003465 int i, j;
3466
3467 switch (hw->mac.type) {
3468 case ixgbe_mac_82598EB:
Jesse Grossf62bbb52010-10-20 13:56:10 +00003469 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3470 vlnctrl &= ~IXGBE_VLNCTRL_VME;
Jesse Brandeburg5f6c0182010-04-14 16:04:23 -07003471 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3472 break;
3473 case ixgbe_mac_82599EB:
Don Skidmoreb93a2222010-11-16 19:27:17 -08003474 case ixgbe_mac_X540:
Jesse Brandeburg5f6c0182010-04-14 16:04:23 -07003475 for (i = 0; i < adapter->num_rx_queues; i++) {
3476 j = adapter->rx_ring[i]->reg_idx;
3477 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
3478 vlnctrl &= ~IXGBE_RXDCTL_VME;
3479 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
3480 }
3481 break;
3482 default:
3483 break;
3484 }
3485}
3486
3487/**
Jesse Grossf62bbb52010-10-20 13:56:10 +00003488 * ixgbe_vlan_strip_enable - helper to enable hw vlan stripping
Jesse Brandeburg5f6c0182010-04-14 16:04:23 -07003489 * @adapter: driver data
3490 */
Jesse Grossf62bbb52010-10-20 13:56:10 +00003491static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
Jesse Brandeburg5f6c0182010-04-14 16:04:23 -07003492{
3493 struct ixgbe_hw *hw = &adapter->hw;
Jesse Grossf62bbb52010-10-20 13:56:10 +00003494 u32 vlnctrl;
Jesse Brandeburg5f6c0182010-04-14 16:04:23 -07003495 int i, j;
3496
3497 switch (hw->mac.type) {
3498 case ixgbe_mac_82598EB:
Jesse Grossf62bbb52010-10-20 13:56:10 +00003499 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3500 vlnctrl |= IXGBE_VLNCTRL_VME;
Jesse Brandeburg5f6c0182010-04-14 16:04:23 -07003501 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3502 break;
3503 case ixgbe_mac_82599EB:
Don Skidmoreb93a2222010-11-16 19:27:17 -08003504 case ixgbe_mac_X540:
Jesse Brandeburg5f6c0182010-04-14 16:04:23 -07003505 for (i = 0; i < adapter->num_rx_queues; i++) {
3506 j = adapter->rx_ring[i]->reg_idx;
3507 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
3508 vlnctrl |= IXGBE_RXDCTL_VME;
3509 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
3510 }
3511 break;
3512 default:
3513 break;
3514 }
3515}
3516
Auke Kok9a799d72007-09-15 14:07:45 -07003517static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
3518{
Jesse Grossf62bbb52010-10-20 13:56:10 +00003519 u16 vid;
Auke Kok9a799d72007-09-15 14:07:45 -07003520
Jesse Grossf62bbb52010-10-20 13:56:10 +00003521 ixgbe_vlan_rx_add_vid(adapter->netdev, 0);
3522
3523 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
3524 ixgbe_vlan_rx_add_vid(adapter->netdev, vid);
Auke Kok9a799d72007-09-15 14:07:45 -07003525}
3526
3527/**
Alexander Duyck28500622010-06-15 09:25:48 +00003528 * ixgbe_write_uc_addr_list - write unicast addresses to RAR table
3529 * @netdev: network interface device structure
3530 *
3531 * Writes unicast address list to the RAR table.
3532 * Returns: -ENOMEM on failure/insufficient address space
3533 * 0 on no addresses written
3534 * X on writing X addresses to the RAR table
3535 **/
3536static int ixgbe_write_uc_addr_list(struct net_device *netdev)
3537{
3538 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3539 struct ixgbe_hw *hw = &adapter->hw;
John Fastabend95447462012-05-31 12:42:26 +00003540 unsigned int rar_entries = hw->mac.num_rar_entries - 1;
Alexander Duyck28500622010-06-15 09:25:48 +00003541 int count = 0;
3542
John Fastabend95447462012-05-31 12:42:26 +00003543 /* In SR-IOV mode significantly less RAR entries are available */
3544 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3545 rar_entries = IXGBE_MAX_PF_MACVLANS - 1;
3546
Alexander Duyck28500622010-06-15 09:25:48 +00003547 /* return ENOMEM indicating insufficient memory for addresses */
3548 if (netdev_uc_count(netdev) > rar_entries)
3549 return -ENOMEM;
3550
John Fastabend95447462012-05-31 12:42:26 +00003551 if (!netdev_uc_empty(netdev)) {
Alexander Duyck28500622010-06-15 09:25:48 +00003552 struct netdev_hw_addr *ha;
3553 /* return error if we do not support writing to RAR table */
3554 if (!hw->mac.ops.set_rar)
3555 return -ENOMEM;
3556
3557 netdev_for_each_uc_addr(ha, netdev) {
3558 if (!rar_entries)
3559 break;
3560 hw->mac.ops.set_rar(hw, rar_entries--, ha->addr,
Alexander Duyck1d9c0bf2012-05-05 05:32:21 +00003561 VMDQ_P(0), IXGBE_RAH_AV);
Alexander Duyck28500622010-06-15 09:25:48 +00003562 count++;
3563 }
3564 }
3565 /* write the addresses in reverse order to avoid write combining */
3566 for (; rar_entries > 0 ; rar_entries--)
3567 hw->mac.ops.clear_rar(hw, rar_entries);
3568
3569 return count;
3570}
3571
3572/**
Christopher Leech2c5645c2008-08-26 04:27:02 -07003573 * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
Auke Kok9a799d72007-09-15 14:07:45 -07003574 * @netdev: network interface device structure
3575 *
Christopher Leech2c5645c2008-08-26 04:27:02 -07003576 * The set_rx_method entry point is called whenever the unicast/multicast
3577 * address list or the network interface flags are updated. This routine is
3578 * responsible for configuring the hardware for proper unicast, multicast and
3579 * promiscuous mode.
Auke Kok9a799d72007-09-15 14:07:45 -07003580 **/
Greg Rose7f870472010-01-09 02:25:29 +00003581void ixgbe_set_rx_mode(struct net_device *netdev)
Auke Kok9a799d72007-09-15 14:07:45 -07003582{
3583 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3584 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck28500622010-06-15 09:25:48 +00003585 u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
3586 int count;
Auke Kok9a799d72007-09-15 14:07:45 -07003587
3588 /* Check for Promiscuous and All Multicast modes */
3589
3590 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3591
Alexander Duyckf5dc4422010-08-19 13:36:49 +00003592 /* set all bits that we expect to always be set */
Ben Greear3f2d1c02012-03-08 08:28:41 +00003593 fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */
Alexander Duyckf5dc4422010-08-19 13:36:49 +00003594 fctrl |= IXGBE_FCTRL_BAM;
3595 fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
3596 fctrl |= IXGBE_FCTRL_PMCF;
3597
Alexander Duyck28500622010-06-15 09:25:48 +00003598 /* clear the bits we are changing the status of */
3599 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3600
Auke Kok9a799d72007-09-15 14:07:45 -07003601 if (netdev->flags & IFF_PROMISC) {
Emil Tantilove433ea12010-05-13 17:33:00 +00003602 hw->addr_ctrl.user_set_promisc = true;
Auke Kok9a799d72007-09-15 14:07:45 -07003603 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
Alexander Duyck28500622010-06-15 09:25:48 +00003604 vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE);
Jesse Brandeburg5f6c0182010-04-14 16:04:23 -07003605 /* don't hardware filter vlans in promisc mode */
3606 ixgbe_vlan_filter_disable(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07003607 } else {
Patrick McHardy746b9f02008-07-16 20:15:45 -07003608 if (netdev->flags & IFF_ALLMULTI) {
3609 fctrl |= IXGBE_FCTRL_MPE;
Alexander Duyck28500622010-06-15 09:25:48 +00003610 vmolr |= IXGBE_VMOLR_MPE;
3611 } else {
3612 /*
3613 * Write addresses to the MTA, if the attempt fails
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003614 * then we should just turn on promiscuous mode so
Alexander Duyck28500622010-06-15 09:25:48 +00003615 * that we can at least receive multicast traffic
3616 */
3617 hw->mac.ops.update_mc_addr_list(hw, netdev);
3618 vmolr |= IXGBE_VMOLR_ROMPE;
Patrick McHardy746b9f02008-07-16 20:15:45 -07003619 }
Jesse Brandeburg5f6c0182010-04-14 16:04:23 -07003620 ixgbe_vlan_filter_enable(adapter);
Emil Tantilove433ea12010-05-13 17:33:00 +00003621 hw->addr_ctrl.user_set_promisc = false;
John Fastabend9dcb3732012-04-15 06:44:25 +00003622 }
3623
3624 /*
3625 * Write addresses to available RAR registers, if there is not
3626 * sufficient space to store all the addresses then enable
3627 * unicast promiscuous mode
3628 */
3629 count = ixgbe_write_uc_addr_list(netdev);
3630 if (count < 0) {
3631 fctrl |= IXGBE_FCTRL_UPE;
3632 vmolr |= IXGBE_VMOLR_ROPE;
Alexander Duyck28500622010-06-15 09:25:48 +00003633 }
3634
Alexander Duyck1d9c0bf2012-05-05 05:32:21 +00003635 if (adapter->num_vfs)
Alexander Duyck28500622010-06-15 09:25:48 +00003636 ixgbe_restore_vf_multicasts(adapter);
Alexander Duyck1d9c0bf2012-05-05 05:32:21 +00003637
3638 if (hw->mac.type != ixgbe_mac_82598EB) {
3639 vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) &
Alexander Duyck28500622010-06-15 09:25:48 +00003640 ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
3641 IXGBE_VMOLR_ROPE);
Alexander Duyck1d9c0bf2012-05-05 05:32:21 +00003642 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(0)), vmolr);
Auke Kok9a799d72007-09-15 14:07:45 -07003643 }
3644
Ben Greear3f2d1c02012-03-08 08:28:41 +00003645 /* This is useful for sniffing bad packets. */
3646 if (adapter->netdev->features & NETIF_F_RXALL) {
3647 /* UPE and MPE will be handled by normal PROMISC logic
3648 * in e1000e_set_rx_mode */
3649 fctrl |= (IXGBE_FCTRL_SBP | /* Receive bad packets */
3650 IXGBE_FCTRL_BAM | /* RX All Bcast Pkts */
3651 IXGBE_FCTRL_PMCF); /* RX All MAC Ctrl Pkts */
3652
3653 fctrl &= ~(IXGBE_FCTRL_DPF);
3654 /* NOTE: VLAN filtering is disabled by setting PROMISC */
3655 }
3656
Auke Kok9a799d72007-09-15 14:07:45 -07003657 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
Jesse Grossf62bbb52010-10-20 13:56:10 +00003658
3659 if (netdev->features & NETIF_F_HW_VLAN_RX)
3660 ixgbe_vlan_strip_enable(adapter);
3661 else
3662 ixgbe_vlan_strip_disable(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07003663}
3664
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003665static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
3666{
3667 int q_idx;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003668
Alexander Duyck49c7ffb2012-05-05 05:30:43 +00003669 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
3670 napi_enable(&adapter->q_vector[q_idx]->napi);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003671}
3672
3673static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
3674{
3675 int q_idx;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003676
Alexander Duyck49c7ffb2012-05-05 05:30:43 +00003677 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
3678 napi_disable(&adapter->q_vector[q_idx]->napi);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003679}
3680
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -08003681#ifdef CONFIG_IXGBE_DCB
Ben Hutchings49ce9c22012-07-10 10:56:00 +00003682/**
Alexander Duyck2f90b862008-11-20 20:52:10 -08003683 * ixgbe_configure_dcb - Configure DCB hardware
3684 * @adapter: ixgbe adapter struct
3685 *
3686 * This is called by the driver on open to configure the DCB hardware.
3687 * This is also called by the gennetlink interface when reconfiguring
3688 * the DCB state.
3689 */
3690static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
3691{
3692 struct ixgbe_hw *hw = &adapter->hw;
John Fastabend98063072010-10-28 00:59:57 +00003693 int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
Alexander Duyck2f90b862008-11-20 20:52:10 -08003694
Alexander Duyck67ebd792010-08-19 13:34:04 +00003695 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
3696 if (hw->mac.type == ixgbe_mac_82598EB)
3697 netif_set_gso_max_size(adapter->netdev, 65536);
3698 return;
3699 }
3700
3701 if (hw->mac.type == ixgbe_mac_82598EB)
3702 netif_set_gso_max_size(adapter->netdev, 32768);
3703
John Fastabendb1208182011-10-15 05:00:10 +00003704#ifdef IXGBE_FCOE
3705 if (adapter->netdev->features & NETIF_F_FCOE_MTU)
3706 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
3707#endif
3708
Alexander Duyck01fa7d92010-11-16 19:26:53 -08003709 /* reconfigure the hardware */
John Fastabend6f70f6a2011-04-26 07:26:25 +00003710 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) {
John Fastabendc27931d2011-02-23 05:58:25 +00003711 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
3712 DCB_TX_CONFIG);
3713 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
3714 DCB_RX_CONFIG);
3715 ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
John Fastabendb1208182011-10-15 05:00:10 +00003716 } else if (adapter->ixgbe_ieee_ets && adapter->ixgbe_ieee_pfc) {
3717 ixgbe_dcb_hw_ets(&adapter->hw,
3718 adapter->ixgbe_ieee_ets,
3719 max_frame);
3720 ixgbe_dcb_hw_pfc_config(&adapter->hw,
3721 adapter->ixgbe_ieee_pfc->pfc_en,
3722 adapter->ixgbe_ieee_ets->prio_tc);
John Fastabendc27931d2011-02-23 05:58:25 +00003723 }
John Fastabend8187cd42011-02-23 05:58:08 +00003724
3725 /* Enable RSS Hash per TC */
3726 if (hw->mac.type != ixgbe_mac_82598EB) {
Alexander Duyck4ae63732012-06-22 06:46:33 +00003727 u32 msb = 0;
3728 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices - 1;
John Fastabend8187cd42011-02-23 05:58:08 +00003729
Alexander Duyckd411a932012-06-30 00:14:01 +00003730 while (rss_i) {
3731 msb++;
3732 rss_i >>= 1;
John Fastabend8187cd42011-02-23 05:58:08 +00003733 }
Alexander Duyckd411a932012-06-30 00:14:01 +00003734
Alexander Duyck4ae63732012-06-22 06:46:33 +00003735 /* write msb to all 8 TCs in one write */
3736 IXGBE_WRITE_REG(hw, IXGBE_RQTC, msb * 0x11111111);
John Fastabend8187cd42011-02-23 05:58:08 +00003737 }
Alexander Duyck2f90b862008-11-20 20:52:10 -08003738}
John Fastabend9da712d2011-08-23 03:14:22 +00003739#endif
3740
3741/* Additional bittime to account for IXGBE framing */
3742#define IXGBE_ETH_FRAMING 20
3743
Ben Hutchings49ce9c22012-07-10 10:56:00 +00003744/**
John Fastabend9da712d2011-08-23 03:14:22 +00003745 * ixgbe_hpbthresh - calculate high water mark for flow control
3746 *
3747 * @adapter: board private structure to calculate for
Ben Hutchings49ce9c22012-07-10 10:56:00 +00003748 * @pb: packet buffer to calculate
John Fastabend9da712d2011-08-23 03:14:22 +00003749 */
3750static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
3751{
3752 struct ixgbe_hw *hw = &adapter->hw;
3753 struct net_device *dev = adapter->netdev;
3754 int link, tc, kb, marker;
3755 u32 dv_id, rx_pba;
3756
3757 /* Calculate max LAN frame size */
3758 tc = link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + IXGBE_ETH_FRAMING;
3759
3760#ifdef IXGBE_FCOE
3761 /* FCoE traffic class uses FCOE jumbo frames */
Alexander Duyck800bd602012-06-02 00:11:02 +00003762 if ((dev->features & NETIF_F_FCOE_MTU) &&
3763 (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
3764 (pb == ixgbe_fcoe_get_tc(adapter)))
3765 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
Alexander Duyck2f90b862008-11-20 20:52:10 -08003766
3767#endif
John Fastabend9da712d2011-08-23 03:14:22 +00003768 /* Calculate delay value for device */
3769 switch (hw->mac.type) {
3770 case ixgbe_mac_X540:
3771 dv_id = IXGBE_DV_X540(link, tc);
3772 break;
3773 default:
3774 dv_id = IXGBE_DV(link, tc);
3775 break;
3776 }
3777
3778 /* Loopback switch introduces additional latency */
3779 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3780 dv_id += IXGBE_B2BT(tc);
3781
3782 /* Delay value is calculated in bit times convert to KB */
3783 kb = IXGBE_BT2KB(dv_id);
3784 rx_pba = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(pb)) >> 10;
3785
3786 marker = rx_pba - kb;
3787
3788 /* It is possible that the packet buffer is not large enough
3789 * to provide required headroom. In this case throw an error
3790 * to user and a do the best we can.
3791 */
3792 if (marker < 0) {
3793 e_warn(drv, "Packet Buffer(%i) can not provide enough"
3794 "headroom to support flow control."
3795 "Decrease MTU or number of traffic classes\n", pb);
3796 marker = tc + 1;
3797 }
3798
3799 return marker;
3800}
3801
Ben Hutchings49ce9c22012-07-10 10:56:00 +00003802/**
John Fastabend9da712d2011-08-23 03:14:22 +00003803 * ixgbe_lpbthresh - calculate low water mark for for flow control
3804 *
3805 * @adapter: board private structure to calculate for
Ben Hutchings49ce9c22012-07-10 10:56:00 +00003806 * @pb: packet buffer to calculate
John Fastabend9da712d2011-08-23 03:14:22 +00003807 */
3808static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter)
3809{
3810 struct ixgbe_hw *hw = &adapter->hw;
3811 struct net_device *dev = adapter->netdev;
3812 int tc;
3813 u32 dv_id;
3814
3815 /* Calculate max LAN frame size */
3816 tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
3817
3818 /* Calculate delay value for device */
3819 switch (hw->mac.type) {
3820 case ixgbe_mac_X540:
3821 dv_id = IXGBE_LOW_DV_X540(tc);
3822 break;
3823 default:
3824 dv_id = IXGBE_LOW_DV(tc);
3825 break;
3826 }
3827
3828 /* Delay value is calculated in bit times convert to KB */
3829 return IXGBE_BT2KB(dv_id);
3830}
3831
3832/*
3833 * ixgbe_pbthresh_setup - calculate and setup high low water marks
3834 */
3835static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter)
3836{
3837 struct ixgbe_hw *hw = &adapter->hw;
3838 int num_tc = netdev_get_num_tc(adapter->netdev);
3839 int i;
3840
3841 if (!num_tc)
3842 num_tc = 1;
3843
3844 hw->fc.low_water = ixgbe_lpbthresh(adapter);
3845
3846 for (i = 0; i < num_tc; i++) {
3847 hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i);
3848
3849 /* Low water marks must not be larger than high water marks */
3850 if (hw->fc.low_water > hw->fc.high_water[i])
3851 hw->fc.low_water = 0;
3852 }
3853}
John Fastabend80605c652011-05-02 12:34:10 +00003854
3855static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
3856{
John Fastabend80605c652011-05-02 12:34:10 +00003857 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyckf7e10272011-07-21 00:40:35 +00003858 int hdrm;
3859 u8 tc = netdev_get_num_tc(adapter->netdev);
John Fastabend80605c652011-05-02 12:34:10 +00003860
3861 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
3862 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
Alexander Duyckf7e10272011-07-21 00:40:35 +00003863 hdrm = 32 << adapter->fdir_pballoc;
3864 else
3865 hdrm = 0;
John Fastabend80605c652011-05-02 12:34:10 +00003866
Alexander Duyckf7e10272011-07-21 00:40:35 +00003867 hw->mac.ops.set_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL);
John Fastabend9da712d2011-08-23 03:14:22 +00003868 ixgbe_pbthresh_setup(adapter);
John Fastabend80605c652011-05-02 12:34:10 +00003869}
3870
Alexander Duycke4911d52011-05-11 07:18:52 +00003871static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
3872{
3873 struct ixgbe_hw *hw = &adapter->hw;
3874 struct hlist_node *node, *node2;
3875 struct ixgbe_fdir_filter *filter;
3876
3877 spin_lock(&adapter->fdir_perfect_lock);
3878
3879 if (!hlist_empty(&adapter->fdir_filter_list))
3880 ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask);
3881
3882 hlist_for_each_entry_safe(filter, node, node2,
3883 &adapter->fdir_filter_list, fdir_node) {
3884 ixgbe_fdir_write_perfect_filter_82599(hw,
Alexander Duyck1f4d5182011-05-14 01:16:02 +00003885 &filter->filter,
3886 filter->sw_idx,
3887 (filter->action == IXGBE_FDIR_DROP_QUEUE) ?
3888 IXGBE_FDIR_DROP_QUEUE :
3889 adapter->rx_ring[filter->action]->reg_idx);
Alexander Duycke4911d52011-05-11 07:18:52 +00003890 }
3891
3892 spin_unlock(&adapter->fdir_perfect_lock);
3893}
3894
Auke Kok9a799d72007-09-15 14:07:45 -07003895static void ixgbe_configure(struct ixgbe_adapter *adapter)
3896{
Atita Shirwaikard2f5e7f2012-02-18 02:58:58 +00003897 struct ixgbe_hw *hw = &adapter->hw;
3898
John Fastabend80605c652011-05-02 12:34:10 +00003899 ixgbe_configure_pb(adapter);
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -08003900#ifdef CONFIG_IXGBE_DCB
Alexander Duyck67ebd792010-08-19 13:34:04 +00003901 ixgbe_configure_dcb(adapter);
Alexander Duyck2f90b862008-11-20 20:52:10 -08003902#endif
Alexander Duyckb35d4d42012-05-23 05:39:25 +00003903 /*
3904 * We must restore virtualization before VLANs or else
3905 * the VLVF registers will not be populated
3906 */
3907 ixgbe_configure_virtualization(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07003908
Alexander Duyck4c1d7b42011-07-21 00:40:30 +00003909 ixgbe_set_rx_mode(adapter->netdev);
Jesse Grossf62bbb52010-10-20 13:56:10 +00003910 ixgbe_restore_vlan(adapter);
3911
Atita Shirwaikard2f5e7f2012-02-18 02:58:58 +00003912 switch (hw->mac.type) {
3913 case ixgbe_mac_82599EB:
3914 case ixgbe_mac_X540:
3915 hw->mac.ops.disable_rx_buff(hw);
3916 break;
3917 default:
3918 break;
3919 }
3920
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00003921 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
Alexander Duyck4c1d7b42011-07-21 00:40:30 +00003922 ixgbe_init_fdir_signature_82599(&adapter->hw,
3923 adapter->fdir_pballoc);
Alexander Duycke4911d52011-05-11 07:18:52 +00003924 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
3925 ixgbe_init_fdir_perfect_82599(&adapter->hw,
3926 adapter->fdir_pballoc);
3927 ixgbe_fdir_filter_restore(adapter);
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00003928 }
Alexander Duyck4c1d7b42011-07-21 00:40:30 +00003929
Atita Shirwaikard2f5e7f2012-02-18 02:58:58 +00003930 switch (hw->mac.type) {
3931 case ixgbe_mac_82599EB:
3932 case ixgbe_mac_X540:
3933 hw->mac.ops.enable_rx_buff(hw);
3934 break;
3935 default:
3936 break;
3937 }
3938
Alexander Duyck7c8ae652012-05-05 05:32:47 +00003939#ifdef IXGBE_FCOE
3940 /* configure FCoE L2 filters, redirection table, and Rx control */
3941 ixgbe_configure_fcoe(adapter);
3942
3943#endif /* IXGBE_FCOE */
Auke Kok9a799d72007-09-15 14:07:45 -07003944 ixgbe_configure_tx(adapter);
3945 ixgbe_configure_rx(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07003946}
3947
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003948static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
3949{
3950 switch (hw->phy.type) {
3951 case ixgbe_phy_sfp_avago:
3952 case ixgbe_phy_sfp_ftl:
3953 case ixgbe_phy_sfp_intel:
3954 case ixgbe_phy_sfp_unknown:
Don Skidmoreea0a04d2010-05-18 16:00:13 +00003955 case ixgbe_phy_sfp_passive_tyco:
3956 case ixgbe_phy_sfp_passive_unknown:
3957 case ixgbe_phy_sfp_active_unknown:
3958 case ixgbe_phy_sfp_ftl_active:
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003959 return true;
Alexander Duyck8917b442011-07-21 00:40:51 +00003960 case ixgbe_phy_nl:
3961 if (hw->mac.type == ixgbe_mac_82598EB)
3962 return true;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003963 default:
3964 return false;
3965 }
3966}
3967
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08003968/**
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003969 * ixgbe_sfp_link_config - set up SFP+ link
3970 * @adapter: pointer to private adapter struct
3971 **/
3972static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
3973{
Alexander Duyck70864002011-04-27 09:13:56 +00003974 /*
Stephen Hemminger52f33af2011-12-22 16:34:52 +00003975 * We are assuming the worst case scenario here, and that
Alexander Duyck70864002011-04-27 09:13:56 +00003976 * is that an SFP was inserted/removed after the reset
3977 * but before SFP detection was enabled. As such the best
3978 * solution is to just start searching as soon as we start
3979 */
3980 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3981 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003982
Alexander Duyck70864002011-04-27 09:13:56 +00003983 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003984}
3985
3986/**
3987 * ixgbe_non_sfp_link_config - set up non-SFP+ link
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08003988 * @hw: pointer to private hardware struct
3989 *
3990 * Returns 0 on success, negative on failure
3991 **/
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003992static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08003993{
3994 u32 autoneg;
Mallikarjuna R Chilakala8620a102009-09-01 13:49:35 +00003995 bool negotiation, link_up = false;
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08003996 u32 ret = IXGBE_ERR_LINK_SETUP;
3997
3998 if (hw->mac.ops.check_link)
3999 ret = hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
4000
4001 if (ret)
4002 goto link_cfg_out;
4003
Emil Tantilov0b0c2b32011-02-26 06:40:16 +00004004 autoneg = hw->phy.autoneg_advertised;
4005 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
Joe Perchese8e9f692010-09-07 21:34:53 +00004006 ret = hw->mac.ops.get_link_capabilities(hw, &autoneg,
4007 &negotiation);
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08004008 if (ret)
4009 goto link_cfg_out;
4010
Mallikarjuna R Chilakala8620a102009-09-01 13:49:35 +00004011 if (hw->mac.ops.setup_link)
4012 ret = hw->mac.ops.setup_link(hw, autoneg, negotiation, link_up);
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08004013link_cfg_out:
4014 return ret;
4015}
4016
Alexander Duycka34bcff2010-08-19 13:39:20 +00004017static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
Auke Kok9a799d72007-09-15 14:07:45 -07004018{
Auke Kok9a799d72007-09-15 14:07:45 -07004019 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duycka34bcff2010-08-19 13:39:20 +00004020 u32 gpie = 0;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004021
Jesse Brandeburg9b471442009-12-03 11:33:54 +00004022 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
Alexander Duycka34bcff2010-08-19 13:39:20 +00004023 gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
4024 IXGBE_GPIE_OCD;
4025 gpie |= IXGBE_GPIE_EIAME;
Jesse Brandeburg9b471442009-12-03 11:33:54 +00004026 /*
4027 * use EIAM to auto-mask when MSI-X interrupt is asserted
4028 * this saves a register write for every interrupt
4029 */
4030 switch (hw->mac.type) {
4031 case ixgbe_mac_82598EB:
4032 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4033 break;
Jesse Brandeburg9b471442009-12-03 11:33:54 +00004034 case ixgbe_mac_82599EB:
Don Skidmoreb93a2222010-11-16 19:27:17 -08004035 case ixgbe_mac_X540:
4036 default:
Jesse Brandeburg9b471442009-12-03 11:33:54 +00004037 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
4038 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
4039 break;
4040 }
4041 } else {
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004042 /* legacy interrupts, use EIAM to auto-mask when reading EICR,
4043 * specifically only auto mask tx and rx interrupts */
4044 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
Auke Kok9a799d72007-09-15 14:07:45 -07004045 }
4046
Alexander Duycka34bcff2010-08-19 13:39:20 +00004047 /* XXX: to interrupt immediately for EICS writes, enable this */
4048 /* gpie |= IXGBE_GPIE_EIMEN; */
4049
4050 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
4051 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
Alexander Duyck73079ea2012-07-14 06:48:49 +00004052
4053 switch (adapter->ring_feature[RING_F_VMDQ].mask) {
4054 case IXGBE_82599_VMDQ_8Q_MASK:
4055 gpie |= IXGBE_GPIE_VTMODE_16;
4056 break;
4057 case IXGBE_82599_VMDQ_4Q_MASK:
4058 gpie |= IXGBE_GPIE_VTMODE_32;
4059 break;
4060 default:
4061 gpie |= IXGBE_GPIE_VTMODE_64;
4062 break;
4063 }
Mallikarjuna R Chilakala119fc602010-05-20 23:07:06 -07004064 }
4065
Alexander Duyck5fdd31f2011-07-21 00:40:45 +00004066 /* Enable Thermal over heat sensor interrupt */
Don Skidmoref3df98e2011-08-17 10:15:21 +00004067 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) {
4068 switch (adapter->hw.mac.type) {
4069 case ixgbe_mac_82599EB:
4070 gpie |= IXGBE_SDP0_GPIEN;
4071 break;
4072 case ixgbe_mac_X540:
4073 gpie |= IXGBE_EIMS_TS;
4074 break;
4075 default:
4076 break;
4077 }
4078 }
Alexander Duyck5fdd31f2011-07-21 00:40:45 +00004079
Alexander Duycka34bcff2010-08-19 13:39:20 +00004080 /* Enable fan failure interrupt */
4081 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
Jesse Brandeburg0befdb32008-10-31 00:46:40 -07004082 gpie |= IXGBE_SDP1_GPIEN;
Jesse Brandeburg0befdb32008-10-31 00:46:40 -07004083
Don Skidmore2698b202011-04-13 07:01:52 +00004084 if (hw->mac.type == ixgbe_mac_82599EB) {
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004085 gpie |= IXGBE_SDP1_GPIEN;
4086 gpie |= IXGBE_SDP2_GPIEN;
Don Skidmore2698b202011-04-13 07:01:52 +00004087 }
Alexander Duycka34bcff2010-08-19 13:39:20 +00004088
4089 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4090}
4091
Alexander Duyckc7ccde02011-07-21 00:40:40 +00004092static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
Alexander Duycka34bcff2010-08-19 13:39:20 +00004093{
4094 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duycka34bcff2010-08-19 13:39:20 +00004095 int err;
Alexander Duycka34bcff2010-08-19 13:39:20 +00004096 u32 ctrl_ext;
4097
4098 ixgbe_get_hw_control(adapter);
4099 ixgbe_setup_gpie(adapter);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004100
Auke Kok9a799d72007-09-15 14:07:45 -07004101 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
4102 ixgbe_configure_msix(adapter);
4103 else
4104 ixgbe_configure_msi_and_legacy(adapter);
4105
Emil Tantilovec74a472012-09-20 03:33:56 +00004106 /* enable the optics for 82599 SFP+ fiber */
4107 if (hw->mac.ops.enable_tx_laser)
Peter Waskiewicz61fac742010-04-27 00:38:15 +00004108 hw->mac.ops.enable_tx_laser(hw);
4109
Auke Kok9a799d72007-09-15 14:07:45 -07004110 clear_bit(__IXGBE_DOWN, &adapter->state);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004111 ixgbe_napi_enable_all(adapter);
4112
Alexander Duyck73c4b7c2010-11-16 19:26:57 -08004113 if (ixgbe_is_sfp(hw)) {
4114 ixgbe_sfp_link_config(adapter);
4115 } else {
4116 err = ixgbe_non_sfp_link_config(hw);
4117 if (err)
4118 e_err(probe, "link_config FAILED %d\n", err);
4119 }
4120
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004121 /* clear any pending interrupts, may auto mask */
4122 IXGBE_READ_REG(hw, IXGBE_EICR);
Emil Tantilov6af3b9e2010-09-29 21:35:23 +00004123 ixgbe_irq_enable(adapter, true, true);
Auke Kok9a799d72007-09-15 14:07:45 -07004124
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004125 /*
Don Skidmorebf069c92009-05-07 10:39:54 +00004126 * If this adapter has a fan, check to see if we had a failure
4127 * before we enabled the interrupt.
4128 */
4129 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
4130 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
4131 if (esdp & IXGBE_ESDP_SDP1)
Emil Tantilov396e7992010-07-01 20:05:12 +00004132 e_crit(drv, "Fan has stopped, replace the adapter\n");
Don Skidmorebf069c92009-05-07 10:39:54 +00004133 }
4134
Peter P Waskiewicz Jr1da100b2009-01-19 16:55:03 -08004135 /* enable transmits */
Alexander Duyck477de6e2010-08-19 13:38:11 +00004136 netif_tx_start_all_queues(adapter->netdev);
Peter P Waskiewicz Jr1da100b2009-01-19 16:55:03 -08004137
Auke Kok9a799d72007-09-15 14:07:45 -07004138 /* bring the link up in the watchdog, this could race with our first
4139 * link up interrupt but shouldn't be a problem */
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07004140 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
4141 adapter->link_check_timeout = jiffies;
Alexander Duyck70864002011-04-27 09:13:56 +00004142 mod_timer(&adapter->service_timer, jiffies);
Greg Rosec9205692010-01-22 22:46:22 +00004143
4144 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
4145 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4146 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
4147 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
Auke Kok9a799d72007-09-15 14:07:45 -07004148}
4149
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08004150void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
4151{
4152 WARN_ON(in_interrupt());
Alexander Duyck70864002011-04-27 09:13:56 +00004153 /* put off any impending NetWatchDogTimeout */
4154 adapter->netdev->trans_start = jiffies;
4155
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08004156 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
Don Skidmore032b4322011-03-18 09:32:53 +00004157 usleep_range(1000, 2000);
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08004158 ixgbe_down(adapter);
Greg Rose5809a1a2010-03-24 09:36:08 +00004159 /*
4160 * If SR-IOV enabled then wait a bit before bringing the adapter
4161 * back up to give the VFs time to respond to the reset. The
4162 * two second wait is based upon the watchdog timer cycle in
4163 * the VF driver.
4164 */
4165 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
4166 msleep(2000);
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08004167 ixgbe_up(adapter);
4168 clear_bit(__IXGBE_RESETTING, &adapter->state);
4169}
4170
Alexander Duyckc7ccde02011-07-21 00:40:40 +00004171void ixgbe_up(struct ixgbe_adapter *adapter)
Auke Kok9a799d72007-09-15 14:07:45 -07004172{
4173 /* hardware has been reset, we need to reload some things */
4174 ixgbe_configure(adapter);
4175
Alexander Duyckc7ccde02011-07-21 00:40:40 +00004176 ixgbe_up_complete(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07004177}
4178
4179void ixgbe_reset(struct ixgbe_adapter *adapter)
4180{
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07004181 struct ixgbe_hw *hw = &adapter->hw;
Don Skidmore8ca783a2009-05-26 20:40:47 -07004182 int err;
4183
Alexander Duyck70864002011-04-27 09:13:56 +00004184 /* lock SFP init bit to prevent race conditions with the watchdog */
4185 while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
4186 usleep_range(1000, 2000);
4187
4188 /* clear all SFP and link config related flags while holding SFP_INIT */
4189 adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP |
4190 IXGBE_FLAG2_SFP_NEEDS_RESET);
4191 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
4192
Don Skidmore8ca783a2009-05-26 20:40:47 -07004193 err = hw->mac.ops.init_hw(hw);
Peter P Waskiewicz Jrda4dd0f2009-06-04 11:10:35 +00004194 switch (err) {
4195 case 0:
4196 case IXGBE_ERR_SFP_NOT_PRESENT:
Alexander Duyck70864002011-04-27 09:13:56 +00004197 case IXGBE_ERR_SFP_NOT_SUPPORTED:
Peter P Waskiewicz Jrda4dd0f2009-06-04 11:10:35 +00004198 break;
4199 case IXGBE_ERR_MASTER_REQUESTS_PENDING:
Emil Tantilov849c4542010-06-03 16:53:41 +00004200 e_dev_err("master disable timed out\n");
Peter P Waskiewicz Jrda4dd0f2009-06-04 11:10:35 +00004201 break;
Peter P Waskiewicz Jr794caeb2009-06-04 16:02:24 +00004202 case IXGBE_ERR_EEPROM_VERSION:
4203 /* We are running on a pre-production device, log a warning */
Emil Tantilov849c4542010-06-03 16:53:41 +00004204 e_dev_warn("This device is a pre-production adapter/LOM. "
Stephen Hemminger52f33af2011-12-22 16:34:52 +00004205 "Please be aware there may be issues associated with "
Emil Tantilov849c4542010-06-03 16:53:41 +00004206 "your hardware. If you are experiencing problems "
4207 "please contact your Intel or hardware "
4208 "representative who provided you with this "
4209 "hardware.\n");
Peter P Waskiewicz Jr794caeb2009-06-04 16:02:24 +00004210 break;
Peter P Waskiewicz Jrda4dd0f2009-06-04 11:10:35 +00004211 default:
Emil Tantilov849c4542010-06-03 16:53:41 +00004212 e_dev_err("Hardware Error: %d\n", err);
Peter P Waskiewicz Jrda4dd0f2009-06-04 11:10:35 +00004213 }
Auke Kok9a799d72007-09-15 14:07:45 -07004214
Alexander Duyck70864002011-04-27 09:13:56 +00004215 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
4216
Auke Kok9a799d72007-09-15 14:07:45 -07004217 /* reprogram the RAR[0] in case user changed it. */
Alexander Duyck1d9c0bf2012-05-05 05:32:21 +00004218 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV);
Alexander Duyck7fa7c9d2012-05-05 05:32:52 +00004219
4220 /* update SAN MAC vmdq pool selection */
4221 if (hw->mac.san_mac_rar_index)
4222 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
Jacob Keller1a71ab22012-08-25 03:54:19 +00004223
Jacob Keller1a71ab22012-08-25 03:54:19 +00004224 if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
4225 ixgbe_ptp_reset(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07004226}
4227
Auke Kok9a799d72007-09-15 14:07:45 -07004228/**
4229 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
Auke Kok9a799d72007-09-15 14:07:45 -07004230 * @rx_ring: ring to free buffers from
4231 **/
Alexander Duyckb6ec8952010-11-16 19:26:49 -08004232static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07004233{
Alexander Duyckb6ec8952010-11-16 19:26:49 -08004234 struct device *dev = rx_ring->dev;
Auke Kok9a799d72007-09-15 14:07:45 -07004235 unsigned long size;
Alexander Duyckb6ec8952010-11-16 19:26:49 -08004236 u16 i;
Auke Kok9a799d72007-09-15 14:07:45 -07004237
Alexander Duyck84418e32010-08-19 13:40:54 +00004238 /* ring already cleared, nothing to do */
4239 if (!rx_ring->rx_buffer_info)
4240 return;
Auke Kok9a799d72007-09-15 14:07:45 -07004241
Alexander Duyck84418e32010-08-19 13:40:54 +00004242 /* Free all the Rx ring sk_buffs */
Auke Kok9a799d72007-09-15 14:07:45 -07004243 for (i = 0; i < rx_ring->count; i++) {
Alexander Duyckf8003262012-03-03 02:35:52 +00004244 struct ixgbe_rx_buffer *rx_buffer;
Auke Kok9a799d72007-09-15 14:07:45 -07004245
Alexander Duyckf8003262012-03-03 02:35:52 +00004246 rx_buffer = &rx_ring->rx_buffer_info[i];
4247 if (rx_buffer->skb) {
4248 struct sk_buff *skb = rx_buffer->skb;
4249 if (IXGBE_CB(skb)->page_released) {
4250 dma_unmap_page(dev,
4251 IXGBE_CB(skb)->dma,
4252 ixgbe_rx_bufsz(rx_ring),
4253 DMA_FROM_DEVICE);
4254 IXGBE_CB(skb)->page_released = false;
Alexander Duyck4c1975d2012-01-31 02:59:23 +00004255 }
4256 dev_kfree_skb(skb);
Auke Kok9a799d72007-09-15 14:07:45 -07004257 }
Alexander Duyckf8003262012-03-03 02:35:52 +00004258 rx_buffer->skb = NULL;
4259 if (rx_buffer->dma)
4260 dma_unmap_page(dev, rx_buffer->dma,
4261 ixgbe_rx_pg_size(rx_ring),
4262 DMA_FROM_DEVICE);
4263 rx_buffer->dma = 0;
4264 if (rx_buffer->page)
Alexander Duyckdd411ec2012-04-06 04:24:50 +00004265 __free_pages(rx_buffer->page,
4266 ixgbe_rx_pg_order(rx_ring));
Alexander Duyckf8003262012-03-03 02:35:52 +00004267 rx_buffer->page = NULL;
Auke Kok9a799d72007-09-15 14:07:45 -07004268 }
4269
4270 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
4271 memset(rx_ring->rx_buffer_info, 0, size);
4272
4273 /* Zero out the descriptor ring */
4274 memset(rx_ring->desc, 0, rx_ring->size);
4275
Alexander Duyckf8003262012-03-03 02:35:52 +00004276 rx_ring->next_to_alloc = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07004277 rx_ring->next_to_clean = 0;
4278 rx_ring->next_to_use = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07004279}
4280
4281/**
4282 * ixgbe_clean_tx_ring - Free Tx Buffers
Auke Kok9a799d72007-09-15 14:07:45 -07004283 * @tx_ring: ring to be cleaned
4284 **/
Alexander Duyckb6ec8952010-11-16 19:26:49 -08004285static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07004286{
4287 struct ixgbe_tx_buffer *tx_buffer_info;
4288 unsigned long size;
Alexander Duyckb6ec8952010-11-16 19:26:49 -08004289 u16 i;
Auke Kok9a799d72007-09-15 14:07:45 -07004290
Alexander Duyck84418e32010-08-19 13:40:54 +00004291 /* ring already cleared, nothing to do */
4292 if (!tx_ring->tx_buffer_info)
4293 return;
Auke Kok9a799d72007-09-15 14:07:45 -07004294
Alexander Duyck84418e32010-08-19 13:40:54 +00004295 /* Free all the Tx ring sk_buffs */
Auke Kok9a799d72007-09-15 14:07:45 -07004296 for (i = 0; i < tx_ring->count; i++) {
4297 tx_buffer_info = &tx_ring->tx_buffer_info[i];
Alexander Duyckb6ec8952010-11-16 19:26:49 -08004298 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
Auke Kok9a799d72007-09-15 14:07:45 -07004299 }
4300
John Fastabenddad8a3b2012-04-23 12:22:39 +00004301 netdev_tx_reset_queue(txring_txq(tx_ring));
4302
Auke Kok9a799d72007-09-15 14:07:45 -07004303 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
4304 memset(tx_ring->tx_buffer_info, 0, size);
4305
4306 /* Zero out the descriptor ring */
4307 memset(tx_ring->desc, 0, tx_ring->size);
4308
4309 tx_ring->next_to_use = 0;
4310 tx_ring->next_to_clean = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07004311}
4312
4313/**
Auke Kok9a799d72007-09-15 14:07:45 -07004314 * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
4315 * @adapter: board private structure
4316 **/
4317static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
4318{
4319 int i;
4320
4321 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyckb6ec8952010-11-16 19:26:49 -08004322 ixgbe_clean_rx_ring(adapter->rx_ring[i]);
Auke Kok9a799d72007-09-15 14:07:45 -07004323}
4324
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004325/**
4326 * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
4327 * @adapter: board private structure
4328 **/
4329static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
4330{
4331 int i;
4332
4333 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyckb6ec8952010-11-16 19:26:49 -08004334 ixgbe_clean_tx_ring(adapter->tx_ring[i]);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004335}
4336
Alexander Duycke4911d52011-05-11 07:18:52 +00004337static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
4338{
4339 struct hlist_node *node, *node2;
4340 struct ixgbe_fdir_filter *filter;
4341
4342 spin_lock(&adapter->fdir_perfect_lock);
4343
4344 hlist_for_each_entry_safe(filter, node, node2,
4345 &adapter->fdir_filter_list, fdir_node) {
4346 hlist_del(&filter->fdir_node);
4347 kfree(filter);
4348 }
4349 adapter->fdir_filter_count = 0;
4350
4351 spin_unlock(&adapter->fdir_perfect_lock);
4352}
4353
Auke Kok9a799d72007-09-15 14:07:45 -07004354void ixgbe_down(struct ixgbe_adapter *adapter)
4355{
4356 struct net_device *netdev = adapter->netdev;
Jesse Brandeburg7f821872008-09-11 20:00:16 -07004357 struct ixgbe_hw *hw = &adapter->hw;
Auke Kok9a799d72007-09-15 14:07:45 -07004358 u32 rxctrl;
Alexander Duyckbf29ee62010-11-16 19:27:07 -08004359 int i;
Auke Kok9a799d72007-09-15 14:07:45 -07004360
4361 /* signal that we are down to the interrupt handler */
4362 set_bit(__IXGBE_DOWN, &adapter->state);
4363
4364 /* disable receives */
Jesse Brandeburg7f821872008-09-11 20:00:16 -07004365 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4366 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
Auke Kok9a799d72007-09-15 14:07:45 -07004367
Yi Zou2d39d572011-01-06 14:29:56 +00004368 /* disable all enabled rx queues */
4369 for (i = 0; i < adapter->num_rx_queues; i++)
4370 /* this call also flushes the previous write */
4371 ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]);
4372
Don Skidmore032b4322011-03-18 09:32:53 +00004373 usleep_range(10000, 20000);
Auke Kok9a799d72007-09-15 14:07:45 -07004374
Jesse Brandeburg7f821872008-09-11 20:00:16 -07004375 netif_tx_stop_all_queues(netdev);
4376
Alexander Duyck70864002011-04-27 09:13:56 +00004377 /* call carrier off first to avoid false dev_watchdog timeouts */
John Fastabendc0dfb902010-04-27 02:13:39 +00004378 netif_carrier_off(netdev);
4379 netif_tx_disable(netdev);
4380
4381 ixgbe_irq_disable(adapter);
4382
4383 ixgbe_napi_disable_all(adapter);
4384
Alexander Duyckd034acf2011-04-27 09:25:34 +00004385 adapter->flags2 &= ~(IXGBE_FLAG2_FDIR_REQUIRES_REINIT |
4386 IXGBE_FLAG2_RESET_REQUESTED);
Alexander Duyck70864002011-04-27 09:13:56 +00004387 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
4388
4389 del_timer_sync(&adapter->service_timer);
4390
Don Skidmore0a1f87c2009-09-18 09:45:43 +00004391 if (adapter->num_vfs) {
Alexander Duyck8e34d1a2011-07-15 07:29:49 +00004392 /* Clear EITR Select mapping */
4393 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
4394
4395 /* Mark all the VFs as inactive */
4396 for (i = 0 ; i < adapter->num_vfs; i++)
Rusty Russell3db1cd52011-12-19 13:56:45 +00004397 adapter->vfinfo[i].clear_to_send = false;
Alexander Duyck8e34d1a2011-07-15 07:29:49 +00004398
Don Skidmore0a1f87c2009-09-18 09:45:43 +00004399 /* ping all the active vfs to let them know we are going down */
Auke Kok9a799d72007-09-15 14:07:45 -07004400 ixgbe_ping_all_vfs(adapter);
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07004401
Auke Kok9a799d72007-09-15 14:07:45 -07004402 /* Disable all VFTE/VFRE TX/RX */
Peter Waskiewiczb25ebfd2010-10-05 01:27:49 +00004403 ixgbe_disable_tx_rx(adapter);
Peter Waskiewiczb25ebfd2010-10-05 01:27:49 +00004404 }
4405
Jesse Brandeburg7f821872008-09-11 20:00:16 -07004406 /* disable transmits in the hardware now that interrupts are off */
4407 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyckbf29ee62010-11-16 19:27:07 -08004408 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
Alexander Duyck34cecbb2011-04-22 04:08:14 +00004409 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
Jesse Brandeburg7f821872008-09-11 20:00:16 -07004410 }
Alexander Duyck34cecbb2011-04-22 04:08:14 +00004411
4412 /* Disable the Tx DMA engine on 82599 and X540 */
Alexander Duyckbd508172010-11-16 19:27:03 -08004413 switch (hw->mac.type) {
4414 case ixgbe_mac_82599EB:
Don Skidmoreb93a2222010-11-16 19:27:17 -08004415 case ixgbe_mac_X540:
PJ Waskiewicz88512532009-03-13 22:15:10 +00004416 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
Joe Perchese8e9f692010-09-07 21:34:53 +00004417 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
4418 ~IXGBE_DMATXCTL_TE));
Alexander Duyckbd508172010-11-16 19:27:03 -08004419 break;
4420 default:
4421 break;
4422 }
Jesse Brandeburg7f821872008-09-11 20:00:16 -07004423
Paul Larson6f4a0e42008-06-24 17:00:56 -07004424 if (!pci_channel_offline(adapter->pdev))
4425 ixgbe_reset(adapter);
Don Skidmorec6ecf392010-12-03 03:31:51 +00004426
Emil Tantilovec74a472012-09-20 03:33:56 +00004427 /* power down the optics for 82599 SFP+ fiber */
4428 if (hw->mac.ops.disable_tx_laser)
Don Skidmorec6ecf392010-12-03 03:31:51 +00004429 hw->mac.ops.disable_tx_laser(hw);
4430
Auke Kok9a799d72007-09-15 14:07:45 -07004431 ixgbe_clean_all_tx_rings(adapter);
4432 ixgbe_clean_all_rx_rings(adapter);
4433
Jeff Garzik5dd2d332008-10-16 05:09:31 -04004434#ifdef CONFIG_IXGBE_DCA
Jesse Brandeburg96b0e0f2008-08-26 04:27:21 -07004435 /* since we reset the hardware DCA settings were cleared */
Alexander Duycke35ec122009-05-21 13:07:12 +00004436 ixgbe_setup_dca(adapter);
Jesse Brandeburg96b0e0f2008-08-26 04:27:21 -07004437#endif
Auke Kok9a799d72007-09-15 14:07:45 -07004438}
4439
Auke Kok9a799d72007-09-15 14:07:45 -07004440/**
Auke Kok9a799d72007-09-15 14:07:45 -07004441 * ixgbe_tx_timeout - Respond to a Tx Hang
4442 * @netdev: network interface device structure
4443 **/
4444static void ixgbe_tx_timeout(struct net_device *netdev)
4445{
4446 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4447
4448 /* Do the reset outside of interrupt context */
Alexander Duyckc83c6cb2011-04-27 09:21:16 +00004449 ixgbe_tx_timeout_reset(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07004450}
4451
Jesse Brandeburg4df10462009-03-13 22:15:31 +00004452/**
Auke Kok9a799d72007-09-15 14:07:45 -07004453 * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
4454 * @adapter: board private structure to initialize
4455 *
4456 * ixgbe_sw_init initializes the Adapter private data structure.
4457 * Fields are initialized based on PCI device information and
4458 * OS network device settings (MTU size).
4459 **/
4460static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4461{
4462 struct ixgbe_hw *hw = &adapter->hw;
4463 struct pci_dev *pdev = adapter->pdev;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004464 unsigned int rss;
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -08004465#ifdef CONFIG_IXGBE_DCB
Alexander Duyck2f90b862008-11-20 20:52:10 -08004466 int j;
4467 struct tc_configuration *tc;
4468#endif
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004469
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07004470 /* PCI config space info */
4471
4472 hw->vendor_id = pdev->vendor;
4473 hw->device_id = pdev->device;
4474 hw->revision_id = pdev->revision;
4475 hw->subsystem_vendor_id = pdev->subsystem_vendor;
4476 hw->subsystem_device_id = pdev->subsystem_device;
4477
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004478 /* Set capability flags */
Jesse Brandeburg3ed69d72012-02-10 10:20:02 +00004479 rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus());
Alexander Duyckc0876632012-05-10 00:01:46 +00004480 adapter->ring_feature[RING_F_RSS].limit = rss;
Alexander Duyckbd508172010-11-16 19:27:03 -08004481 switch (hw->mac.type) {
4482 case ixgbe_mac_82598EB:
Don Skidmorebf069c92009-05-07 10:39:54 +00004483 if (hw->device_id == IXGBE_DEV_ID_82598AT)
4484 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
Alexander Duyck49c7ffb2012-05-05 05:30:43 +00004485 adapter->max_q_vectors = MAX_Q_VECTORS_82598;
Alexander Duyckbd508172010-11-16 19:27:03 -08004486 break;
Don Skidmoreb93a2222010-11-16 19:27:17 -08004487 case ixgbe_mac_X540:
Jacob Keller4f51bf72011-08-20 04:49:45 +00004488 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
4489 case ixgbe_mac_82599EB:
Alexander Duyck49c7ffb2012-05-05 05:30:43 +00004490 adapter->max_q_vectors = MAX_Q_VECTORS_82599;
Peter P Waskiewicz Jr0c19d6a2009-07-30 12:25:28 +00004491 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
4492 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
Mallikarjuna R Chilakala119fc602010-05-20 23:07:06 -07004493 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
4494 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
Alexander Duyck45b9f502011-01-06 14:29:59 +00004495 /* Flow Director hash filters enabled */
Alexander Duyck45b9f502011-01-06 14:29:59 +00004496 adapter->atr_sample_rate = 20;
Alexander Duyckc0876632012-05-10 00:01:46 +00004497 adapter->ring_feature[RING_F_FDIR].limit =
Joe Perchese8e9f692010-09-07 21:34:53 +00004498 IXGBE_MAX_FDIR_INDICES;
Alexander Duyckc04f6ca2011-05-11 07:18:36 +00004499 adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
Yi Zoueacd73f2009-05-13 13:11:06 +00004500#ifdef IXGBE_FCOE
Yi Zou0d551582009-07-22 14:07:12 +00004501 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
4502 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
Yi Zou61a0f422009-12-03 11:32:22 +00004503#ifdef CONFIG_IXGBE_DCB
Yi Zou6ee16522009-08-31 12:34:28 +00004504 /* Default traffic class to use for FCoE */
John Fastabend56075a92010-07-26 20:41:31 +00004505 adapter->fcoe.up = IXGBE_FCOE_DEFTC;
Yi Zou61a0f422009-12-03 11:32:22 +00004506#endif
Yi Zoueacd73f2009-05-13 13:11:06 +00004507#endif /* IXGBE_FCOE */
Alexander Duyckbd508172010-11-16 19:27:03 -08004508 break;
4509 default:
4510 break;
Alexander Duyckf8212f92009-04-27 22:42:37 +00004511 }
Alexander Duyck2f90b862008-11-20 20:52:10 -08004512
Alexander Duyck7c8ae652012-05-05 05:32:47 +00004513#ifdef IXGBE_FCOE
4514 /* FCoE support exists, always init the FCoE lock */
4515 spin_lock_init(&adapter->fcoe.lock);
4516
4517#endif
Alexander Duyck1fc5f032011-06-02 04:28:39 +00004518 /* n-tuple support exists, always init our spinlock */
4519 spin_lock_init(&adapter->fdir_perfect_lock);
4520
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -08004521#ifdef CONFIG_IXGBE_DCB
John Fastabend4de2a022011-09-27 03:52:01 +00004522 switch (hw->mac.type) {
4523 case ixgbe_mac_X540:
4524 adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
4525 adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
4526 break;
4527 default:
4528 adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
4529 adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
4530 break;
4531 }
4532
Alexander Duyck2f90b862008-11-20 20:52:10 -08004533 /* Configure DCB traffic classes */
4534 for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
4535 tc = &adapter->dcb_cfg.tc_config[j];
4536 tc->path[DCB_TX_CONFIG].bwg_id = 0;
4537 tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
4538 tc->path[DCB_RX_CONFIG].bwg_id = 0;
4539 tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
4540 tc->dcb_pfc = pfc_disabled;
4541 }
John Fastabend4de2a022011-09-27 03:52:01 +00004542
4543 /* Initialize default user to priority mapping, UPx->TC0 */
4544 tc = &adapter->dcb_cfg.tc_config[0];
4545 tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
4546 tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
4547
Alexander Duyck2f90b862008-11-20 20:52:10 -08004548 adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
4549 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
Peter P Waskiewicz Jr264857b2009-05-17 12:35:16 +00004550 adapter->dcb_cfg.pfc_mode_enable = false;
Alexander Duyck2f90b862008-11-20 20:52:10 -08004551 adapter->dcb_set_bitmap = 0x00;
John Fastabend30323092011-03-01 05:25:35 +00004552 adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
John Fastabendf525c6d22012-04-18 22:42:27 +00004553 memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
4554 sizeof(adapter->temp_dcb_cfg));
Alexander Duyck2f90b862008-11-20 20:52:10 -08004555
4556#endif
Auke Kok9a799d72007-09-15 14:07:45 -07004557
4558 /* default flow control settings */
Don Skidmorecd7664f2009-03-31 21:33:44 +00004559 hw->fc.requested_mode = ixgbe_fc_full;
Don Skidmore71fd5702009-03-31 21:35:05 +00004560 hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */
John Fastabend9da712d2011-08-23 03:14:22 +00004561 ixgbe_pbthresh_setup(adapter);
Jesse Brandeburg2b9ade92008-08-26 04:27:10 -07004562 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
4563 hw->fc.send_xon = true;
Don Skidmore71fd5702009-03-31 21:35:05 +00004564 hw->fc.disable_fc_autoneg = false;
Auke Kok9a799d72007-09-15 14:07:45 -07004565
Alexander Duyck99d74482012-05-09 08:09:25 +00004566#ifdef CONFIG_PCI_IOV
4567 /* assign number of SR-IOV VFs */
4568 if (hw->mac.type != ixgbe_mac_82598EB)
4569 adapter->num_vfs = (max_vfs > 63) ? 0 : max_vfs;
4570
4571#endif
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07004572 /* enable itr by default in dynamic mode */
Nelson, Shannonf7554a22009-09-18 09:46:06 +00004573 adapter->rx_itr_setting = 1;
Nelson, Shannonf7554a22009-09-18 09:46:06 +00004574 adapter->tx_itr_setting = 1;
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07004575
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07004576 /* set default ring sizes */
4577 adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
4578 adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
4579
Alexander Duyckbd198052011-06-11 01:45:08 +00004580 /* set default work limits */
Alexander Duyck59224552011-08-31 00:01:06 +00004581 adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK;
Alexander Duyckbd198052011-06-11 01:45:08 +00004582
Auke Kok9a799d72007-09-15 14:07:45 -07004583 /* initialize eeprom parameters */
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07004584 if (ixgbe_init_eeprom_params_generic(hw)) {
Emil Tantilov849c4542010-06-03 16:53:41 +00004585 e_dev_err("EEPROM initialization failed\n");
Auke Kok9a799d72007-09-15 14:07:45 -07004586 return -EIO;
4587 }
4588
Auke Kok9a799d72007-09-15 14:07:45 -07004589 set_bit(__IXGBE_DOWN, &adapter->state);
4590
4591 return 0;
4592}
4593
4594/**
4595 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
Jesse Brandeburg3a581072008-08-26 04:27:08 -07004596 * @tx_ring: tx descriptor ring (for a specific queue) to setup
Auke Kok9a799d72007-09-15 14:07:45 -07004597 *
4598 * Return 0 on success, negative on failure
4599 **/
Alexander Duyckb6ec8952010-11-16 19:26:49 -08004600int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07004601{
Alexander Duyckb6ec8952010-11-16 19:26:49 -08004602 struct device *dev = tx_ring->dev;
Alexander Duyckde88eee2012-02-08 07:49:59 +00004603 int orig_node = dev_to_node(dev);
4604 int numa_node = -1;
Auke Kok9a799d72007-09-15 14:07:45 -07004605 int size;
4606
Jesse Brandeburg3a581072008-08-26 04:27:08 -07004607 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
Alexander Duyckde88eee2012-02-08 07:49:59 +00004608
4609 if (tx_ring->q_vector)
4610 numa_node = tx_ring->q_vector->numa_node;
4611
4612 tx_ring->tx_buffer_info = vzalloc_node(size, numa_node);
Jesse Brandeburg1a6c14a2010-02-03 14:18:50 +00004613 if (!tx_ring->tx_buffer_info)
Eric Dumazet89bf67f2010-11-22 00:15:06 +00004614 tx_ring->tx_buffer_info = vzalloc(size);
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07004615 if (!tx_ring->tx_buffer_info)
4616 goto err;
Auke Kok9a799d72007-09-15 14:07:45 -07004617
4618 /* round up to nearest 4K */
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -08004619 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
Jesse Brandeburg3a581072008-08-26 04:27:08 -07004620 tx_ring->size = ALIGN(tx_ring->size, 4096);
Auke Kok9a799d72007-09-15 14:07:45 -07004621
Alexander Duyckde88eee2012-02-08 07:49:59 +00004622 set_dev_node(dev, numa_node);
4623 tx_ring->desc = dma_alloc_coherent(dev,
4624 tx_ring->size,
4625 &tx_ring->dma,
4626 GFP_KERNEL);
4627 set_dev_node(dev, orig_node);
4628 if (!tx_ring->desc)
4629 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
4630 &tx_ring->dma, GFP_KERNEL);
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07004631 if (!tx_ring->desc)
4632 goto err;
Auke Kok9a799d72007-09-15 14:07:45 -07004633
Jesse Brandeburg3a581072008-08-26 04:27:08 -07004634 tx_ring->next_to_use = 0;
4635 tx_ring->next_to_clean = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07004636 return 0;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07004637
4638err:
4639 vfree(tx_ring->tx_buffer_info);
4640 tx_ring->tx_buffer_info = NULL;
Alexander Duyckb6ec8952010-11-16 19:26:49 -08004641 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07004642 return -ENOMEM;
Auke Kok9a799d72007-09-15 14:07:45 -07004643}
4644
4645/**
Alexander Duyck69888672008-09-11 20:05:39 -07004646 * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
4647 * @adapter: board private structure
4648 *
4649 * If this function returns with an error, then it's possible one or
4650 * more of the rings is populated (while the rest are not). It is the
4651 * callers duty to clean those orphaned rings.
4652 *
4653 * Return 0 on success, negative on failure
4654 **/
4655static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
4656{
4657 int i, err = 0;
4658
4659 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyckb6ec8952010-11-16 19:26:49 -08004660 err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
Alexander Duyck69888672008-09-11 20:05:39 -07004661 if (!err)
4662 continue;
Alexander Duyckde3d5b92012-05-18 06:33:47 +00004663
Emil Tantilov396e7992010-07-01 20:05:12 +00004664 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
Alexander Duyckde3d5b92012-05-18 06:33:47 +00004665 goto err_setup_tx;
Alexander Duyck69888672008-09-11 20:05:39 -07004666 }
4667
Alexander Duyckde3d5b92012-05-18 06:33:47 +00004668 return 0;
4669err_setup_tx:
4670 /* rewind the index freeing the rings as we go */
4671 while (i--)
4672 ixgbe_free_tx_resources(adapter->tx_ring[i]);
Alexander Duyck69888672008-09-11 20:05:39 -07004673 return err;
4674}
4675
4676/**
Auke Kok9a799d72007-09-15 14:07:45 -07004677 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
Jesse Brandeburg3a581072008-08-26 04:27:08 -07004678 * @rx_ring: rx descriptor ring (for a specific queue) to setup
Auke Kok9a799d72007-09-15 14:07:45 -07004679 *
4680 * Returns 0 on success, negative on failure
4681 **/
Alexander Duyckb6ec8952010-11-16 19:26:49 -08004682int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07004683{
Alexander Duyckb6ec8952010-11-16 19:26:49 -08004684 struct device *dev = rx_ring->dev;
Alexander Duyckde88eee2012-02-08 07:49:59 +00004685 int orig_node = dev_to_node(dev);
4686 int numa_node = -1;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004687 int size;
Auke Kok9a799d72007-09-15 14:07:45 -07004688
Jesse Brandeburg3a581072008-08-26 04:27:08 -07004689 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
Alexander Duyckde88eee2012-02-08 07:49:59 +00004690
4691 if (rx_ring->q_vector)
4692 numa_node = rx_ring->q_vector->numa_node;
4693
4694 rx_ring->rx_buffer_info = vzalloc_node(size, numa_node);
Jesse Brandeburg1a6c14a2010-02-03 14:18:50 +00004695 if (!rx_ring->rx_buffer_info)
Eric Dumazet89bf67f2010-11-22 00:15:06 +00004696 rx_ring->rx_buffer_info = vzalloc(size);
Alexander Duyckb6ec8952010-11-16 19:26:49 -08004697 if (!rx_ring->rx_buffer_info)
4698 goto err;
Auke Kok9a799d72007-09-15 14:07:45 -07004699
Auke Kok9a799d72007-09-15 14:07:45 -07004700 /* Round up to nearest 4K */
Jesse Brandeburg3a581072008-08-26 04:27:08 -07004701 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
4702 rx_ring->size = ALIGN(rx_ring->size, 4096);
Auke Kok9a799d72007-09-15 14:07:45 -07004703
Alexander Duyckde88eee2012-02-08 07:49:59 +00004704 set_dev_node(dev, numa_node);
4705 rx_ring->desc = dma_alloc_coherent(dev,
4706 rx_ring->size,
4707 &rx_ring->dma,
4708 GFP_KERNEL);
4709 set_dev_node(dev, orig_node);
4710 if (!rx_ring->desc)
4711 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
4712 &rx_ring->dma, GFP_KERNEL);
Alexander Duyckb6ec8952010-11-16 19:26:49 -08004713 if (!rx_ring->desc)
4714 goto err;
Auke Kok9a799d72007-09-15 14:07:45 -07004715
Jesse Brandeburg3a581072008-08-26 04:27:08 -07004716 rx_ring->next_to_clean = 0;
4717 rx_ring->next_to_use = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07004718
4719 return 0;
Alexander Duyckb6ec8952010-11-16 19:26:49 -08004720err:
4721 vfree(rx_ring->rx_buffer_info);
4722 rx_ring->rx_buffer_info = NULL;
4723 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07004724 return -ENOMEM;
Auke Kok9a799d72007-09-15 14:07:45 -07004725}
4726
4727/**
Alexander Duyck69888672008-09-11 20:05:39 -07004728 * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
4729 * @adapter: board private structure
4730 *
4731 * If this function returns with an error, then it's possible one or
4732 * more of the rings is populated (while the rest are not). It is the
4733 * callers duty to clean those orphaned rings.
4734 *
4735 * Return 0 on success, negative on failure
4736 **/
Alexander Duyck69888672008-09-11 20:05:39 -07004737static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
4738{
4739 int i, err = 0;
4740
4741 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyckb6ec8952010-11-16 19:26:49 -08004742 err = ixgbe_setup_rx_resources(adapter->rx_ring[i]);
Alexander Duyck69888672008-09-11 20:05:39 -07004743 if (!err)
4744 continue;
Alexander Duyckde3d5b92012-05-18 06:33:47 +00004745
Emil Tantilov396e7992010-07-01 20:05:12 +00004746 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
Alexander Duyckde3d5b92012-05-18 06:33:47 +00004747 goto err_setup_rx;
Alexander Duyck69888672008-09-11 20:05:39 -07004748 }
4749
Alexander Duyck7c8ae652012-05-05 05:32:47 +00004750#ifdef IXGBE_FCOE
4751 err = ixgbe_setup_fcoe_ddp_resources(adapter);
4752 if (!err)
4753#endif
4754 return 0;
Alexander Duyckde3d5b92012-05-18 06:33:47 +00004755err_setup_rx:
4756 /* rewind the index freeing the rings as we go */
4757 while (i--)
4758 ixgbe_free_rx_resources(adapter->rx_ring[i]);
Alexander Duyck69888672008-09-11 20:05:39 -07004759 return err;
4760}
4761
4762/**
Auke Kok9a799d72007-09-15 14:07:45 -07004763 * ixgbe_free_tx_resources - Free Tx Resources per Queue
Auke Kok9a799d72007-09-15 14:07:45 -07004764 * @tx_ring: Tx descriptor ring for a specific queue
4765 *
4766 * Free all transmit software resources
4767 **/
Alexander Duyckb6ec8952010-11-16 19:26:49 -08004768void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07004769{
Alexander Duyckb6ec8952010-11-16 19:26:49 -08004770 ixgbe_clean_tx_ring(tx_ring);
Auke Kok9a799d72007-09-15 14:07:45 -07004771
4772 vfree(tx_ring->tx_buffer_info);
4773 tx_ring->tx_buffer_info = NULL;
4774
Alexander Duyckb6ec8952010-11-16 19:26:49 -08004775 /* if not set, then don't free */
4776 if (!tx_ring->desc)
4777 return;
4778
4779 dma_free_coherent(tx_ring->dev, tx_ring->size,
4780 tx_ring->desc, tx_ring->dma);
Auke Kok9a799d72007-09-15 14:07:45 -07004781
4782 tx_ring->desc = NULL;
4783}
4784
4785/**
4786 * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
4787 * @adapter: board private structure
4788 *
4789 * Free all transmit software resources
4790 **/
4791static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
4792{
4793 int i;
4794
4795 for (i = 0; i < adapter->num_tx_queues; i++)
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00004796 if (adapter->tx_ring[i]->desc)
Alexander Duyckb6ec8952010-11-16 19:26:49 -08004797 ixgbe_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9a799d72007-09-15 14:07:45 -07004798}
4799
4800/**
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004801 * ixgbe_free_rx_resources - Free Rx Resources
Auke Kok9a799d72007-09-15 14:07:45 -07004802 * @rx_ring: ring to clean the resources from
4803 *
4804 * Free all receive software resources
4805 **/
Alexander Duyckb6ec8952010-11-16 19:26:49 -08004806void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07004807{
Alexander Duyckb6ec8952010-11-16 19:26:49 -08004808 ixgbe_clean_rx_ring(rx_ring);
Auke Kok9a799d72007-09-15 14:07:45 -07004809
4810 vfree(rx_ring->rx_buffer_info);
4811 rx_ring->rx_buffer_info = NULL;
4812
Alexander Duyckb6ec8952010-11-16 19:26:49 -08004813 /* if not set, then don't free */
4814 if (!rx_ring->desc)
4815 return;
4816
4817 dma_free_coherent(rx_ring->dev, rx_ring->size,
4818 rx_ring->desc, rx_ring->dma);
Auke Kok9a799d72007-09-15 14:07:45 -07004819
4820 rx_ring->desc = NULL;
4821}
4822
4823/**
4824 * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
4825 * @adapter: board private structure
4826 *
4827 * Free all receive software resources
4828 **/
4829static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
4830{
4831 int i;
4832
Alexander Duyck7c8ae652012-05-05 05:32:47 +00004833#ifdef IXGBE_FCOE
4834 ixgbe_free_fcoe_ddp_resources(adapter);
4835
4836#endif
Auke Kok9a799d72007-09-15 14:07:45 -07004837 for (i = 0; i < adapter->num_rx_queues; i++)
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00004838 if (adapter->rx_ring[i]->desc)
Alexander Duyckb6ec8952010-11-16 19:26:49 -08004839 ixgbe_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9a799d72007-09-15 14:07:45 -07004840}
4841
4842/**
Auke Kok9a799d72007-09-15 14:07:45 -07004843 * ixgbe_change_mtu - Change the Maximum Transfer Unit
4844 * @netdev: network interface device structure
4845 * @new_mtu: new value for maximum frame size
4846 *
4847 * Returns 0 on success, negative on failure
4848 **/
4849static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
4850{
4851 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4852 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
4853
Jesse Brandeburg42c783c2008-09-11 19:56:28 -07004854 /* MTU < 68 is an error and causes problems on some kernels */
Alexander Duyck655309e2012-02-08 07:50:35 +00004855 if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
4856 return -EINVAL;
4857
4858 /*
Alexander Duyck872844d2012-08-15 02:10:43 +00004859 * For 82599EB we cannot allow legacy VFs to enable their receive
4860 * paths when MTU greater than 1500 is configured. So display a
4861 * warning that legacy VFs will be disabled.
Alexander Duyck655309e2012-02-08 07:50:35 +00004862 */
4863 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
4864 (adapter->hw.mac.type == ixgbe_mac_82599EB) &&
4865 (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
Alexander Duyck872844d2012-08-15 02:10:43 +00004866 e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n");
Auke Kok9a799d72007-09-15 14:07:45 -07004867
Emil Tantilov396e7992010-07-01 20:05:12 +00004868 e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
Alexander Duyck655309e2012-02-08 07:50:35 +00004869
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004870 /* must set new MTU before calling down or up */
Auke Kok9a799d72007-09-15 14:07:45 -07004871 netdev->mtu = new_mtu;
4872
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08004873 if (netif_running(netdev))
4874 ixgbe_reinit_locked(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07004875
4876 return 0;
4877}
4878
4879/**
4880 * ixgbe_open - Called when a network interface is made active
4881 * @netdev: network interface device structure
4882 *
4883 * Returns 0 on success, negative value on failure
4884 *
4885 * The open entry point is called when a network interface is made
4886 * active by the system (IFF_UP). At this point all resources needed
4887 * for transmit and receive operations are allocated, the interrupt
4888 * handler is registered with the OS, the watchdog timer is started,
4889 * and the stack is notified that the interface is ready.
4890 **/
4891static int ixgbe_open(struct net_device *netdev)
4892{
4893 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4894 int err;
Auke Kok9a799d72007-09-15 14:07:45 -07004895
Auke Kok4bebfaa2008-02-11 09:26:01 -08004896 /* disallow open during test */
4897 if (test_bit(__IXGBE_TESTING, &adapter->state))
4898 return -EBUSY;
4899
Jesse Brandeburg54386462009-04-17 20:44:27 +00004900 netif_carrier_off(netdev);
4901
Auke Kok9a799d72007-09-15 14:07:45 -07004902 /* allocate transmit descriptors */
4903 err = ixgbe_setup_all_tx_resources(adapter);
4904 if (err)
4905 goto err_setup_tx;
4906
Auke Kok9a799d72007-09-15 14:07:45 -07004907 /* allocate receive descriptors */
4908 err = ixgbe_setup_all_rx_resources(adapter);
4909 if (err)
4910 goto err_setup_rx;
4911
4912 ixgbe_configure(adapter);
4913
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004914 err = ixgbe_request_irq(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07004915 if (err)
4916 goto err_req_irq;
4917
Alexander Duyckac802f52012-07-12 05:52:53 +00004918 /* Notify the stack of the actual queue counts. */
4919 err = netif_set_real_num_tx_queues(netdev,
4920 adapter->num_rx_pools > 1 ? 1 :
4921 adapter->num_tx_queues);
4922 if (err)
4923 goto err_set_queues;
4924
4925
4926 err = netif_set_real_num_rx_queues(netdev,
4927 adapter->num_rx_pools > 1 ? 1 :
4928 adapter->num_rx_queues);
4929 if (err)
4930 goto err_set_queues;
4931
Jacob Keller1a71ab22012-08-25 03:54:19 +00004932 ixgbe_ptp_init(adapter);
Jacob Keller1a71ab22012-08-25 03:54:19 +00004933
Alexander Duyckc7ccde02011-07-21 00:40:40 +00004934 ixgbe_up_complete(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07004935
4936 return 0;
4937
Alexander Duyckac802f52012-07-12 05:52:53 +00004938err_set_queues:
4939 ixgbe_free_irq(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07004940err_req_irq:
Mallikarjuna R Chilakalaa20a1192009-03-31 21:34:44 +00004941 ixgbe_free_all_rx_resources(adapter);
Alexander Duyckde3d5b92012-05-18 06:33:47 +00004942err_setup_rx:
Mallikarjuna R Chilakalaa20a1192009-03-31 21:34:44 +00004943 ixgbe_free_all_tx_resources(adapter);
Alexander Duyckde3d5b92012-05-18 06:33:47 +00004944err_setup_tx:
Auke Kok9a799d72007-09-15 14:07:45 -07004945 ixgbe_reset(adapter);
4946
4947 return err;
4948}
4949
4950/**
4951 * ixgbe_close - Disables a network interface
4952 * @netdev: network interface device structure
4953 *
4954 * Returns 0, this is not allowed to fail
4955 *
4956 * The close entry point is called when an interface is de-activated
4957 * by the OS. The hardware is still under the drivers control, but
4958 * needs to be disabled. A global MAC reset is issued to stop the
4959 * hardware, and all transmit and receive resources are freed.
4960 **/
4961static int ixgbe_close(struct net_device *netdev)
4962{
4963 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07004964
Jacob Keller1a71ab22012-08-25 03:54:19 +00004965 ixgbe_ptp_stop(adapter);
Jacob Keller1a71ab22012-08-25 03:54:19 +00004966
Auke Kok9a799d72007-09-15 14:07:45 -07004967 ixgbe_down(adapter);
4968 ixgbe_free_irq(adapter);
4969
Alexander Duycke4911d52011-05-11 07:18:52 +00004970 ixgbe_fdir_filter_exit(adapter);
4971
Auke Kok9a799d72007-09-15 14:07:45 -07004972 ixgbe_free_all_tx_resources(adapter);
4973 ixgbe_free_all_rx_resources(adapter);
4974
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -08004975 ixgbe_release_hw_control(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07004976
4977 return 0;
4978}
4979
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004980#ifdef CONFIG_PM
4981static int ixgbe_resume(struct pci_dev *pdev)
4982{
Alexander Duyckc60fbb02010-11-16 19:26:54 -08004983 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
4984 struct net_device *netdev = adapter->netdev;
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004985 u32 err;
4986
4987 pci_set_power_state(pdev, PCI_D0);
4988 pci_restore_state(pdev);
Don Skidmore656ab812009-12-23 21:19:19 -08004989 /*
4990 * pci_restore_state clears dev->state_saved so call
4991 * pci_save_state to restore it.
4992 */
4993 pci_save_state(pdev);
gouji-new9ce77662009-05-06 10:44:45 +00004994
4995 err = pci_enable_device_mem(pdev);
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004996 if (err) {
Emil Tantilov849c4542010-06-03 16:53:41 +00004997 e_dev_err("Cannot enable PCI device from suspend\n");
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004998 return err;
4999 }
5000 pci_set_master(pdev);
5001
Don Skidmoredd4d8ca2009-04-29 00:22:31 -07005002 pci_wake_from_d3(pdev, false);
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005003
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005004 ixgbe_reset(adapter);
5005
Waskiewicz Jr, Peter P495dce12009-04-23 11:15:18 +00005006 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
5007
Alexander Duyckac802f52012-07-12 05:52:53 +00005008 rtnl_lock();
5009 err = ixgbe_init_interrupt_scheme(adapter);
5010 if (!err && netif_running(netdev))
Alexander Duyckc60fbb02010-11-16 19:26:54 -08005011 err = ixgbe_open(netdev);
Alexander Duyckac802f52012-07-12 05:52:53 +00005012
5013 rtnl_unlock();
5014
5015 if (err)
5016 return err;
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005017
5018 netif_device_attach(netdev);
5019
5020 return 0;
5021}
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005022#endif /* CONFIG_PM */
Rafael J. Wysocki9d8d05a2009-04-15 17:44:01 +00005023
5024static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005025{
Alexander Duyckc60fbb02010-11-16 19:26:54 -08005026 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
5027 struct net_device *netdev = adapter->netdev;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005028 struct ixgbe_hw *hw = &adapter->hw;
5029 u32 ctrl, fctrl;
5030 u32 wufc = adapter->wol;
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005031#ifdef CONFIG_PM
5032 int retval = 0;
5033#endif
5034
5035 netif_device_detach(netdev);
5036
5037 if (netif_running(netdev)) {
Don Skidmoreab6039a2012-03-17 05:51:52 +00005038 rtnl_lock();
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005039 ixgbe_down(adapter);
5040 ixgbe_free_irq(adapter);
5041 ixgbe_free_all_tx_resources(adapter);
5042 ixgbe_free_all_rx_resources(adapter);
Don Skidmoreab6039a2012-03-17 05:51:52 +00005043 rtnl_unlock();
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005044 }
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005045
Alexander Duyck5f5ae6f2010-11-16 19:26:52 -08005046 ixgbe_clear_interrupt_scheme(adapter);
5047
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005048#ifdef CONFIG_PM
5049 retval = pci_save_state(pdev);
5050 if (retval)
5051 return retval;
Jesse Brandeburg4df10462009-03-13 22:15:31 +00005052
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005053#endif
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005054 if (wufc) {
5055 ixgbe_set_rx_mode(netdev);
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005056
Emil Tantilovec74a472012-09-20 03:33:56 +00005057 /* enable the optics for 82599 SFP+ fiber as we can WoL */
5058 if (hw->mac.ops.enable_tx_laser)
Don Skidmorec509e752012-04-05 08:12:05 +00005059 hw->mac.ops.enable_tx_laser(hw);
5060
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005061 /* turn on all-multi mode if wake on multicast is enabled */
5062 if (wufc & IXGBE_WUFC_MC) {
5063 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
5064 fctrl |= IXGBE_FCTRL_MPE;
5065 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
5066 }
5067
5068 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
5069 ctrl |= IXGBE_CTRL_GIO_DIS;
5070 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
5071
5072 IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc);
5073 } else {
5074 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
5075 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
5076 }
5077
Alexander Duyckbd508172010-11-16 19:27:03 -08005078 switch (hw->mac.type) {
5079 case ixgbe_mac_82598EB:
Don Skidmoredd4d8ca2009-04-29 00:22:31 -07005080 pci_wake_from_d3(pdev, false);
Alexander Duyckbd508172010-11-16 19:27:03 -08005081 break;
5082 case ixgbe_mac_82599EB:
Don Skidmoreb93a2222010-11-16 19:27:17 -08005083 case ixgbe_mac_X540:
Alexander Duyckbd508172010-11-16 19:27:03 -08005084 pci_wake_from_d3(pdev, !!wufc);
5085 break;
5086 default:
5087 break;
5088 }
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005089
Rafael J. Wysocki9d8d05a2009-04-15 17:44:01 +00005090 *enable_wake = !!wufc;
5091
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005092 ixgbe_release_hw_control(adapter);
5093
5094 pci_disable_device(pdev);
5095
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005096 return 0;
5097}
5098
Rafael J. Wysocki9d8d05a2009-04-15 17:44:01 +00005099#ifdef CONFIG_PM
5100static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
5101{
5102 int retval;
5103 bool wake;
5104
5105 retval = __ixgbe_shutdown(pdev, &wake);
5106 if (retval)
5107 return retval;
5108
5109 if (wake) {
5110 pci_prepare_to_sleep(pdev);
5111 } else {
5112 pci_wake_from_d3(pdev, false);
5113 pci_set_power_state(pdev, PCI_D3hot);
5114 }
5115
5116 return 0;
5117}
5118#endif /* CONFIG_PM */
5119
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005120static void ixgbe_shutdown(struct pci_dev *pdev)
5121{
Rafael J. Wysocki9d8d05a2009-04-15 17:44:01 +00005122 bool wake;
5123
5124 __ixgbe_shutdown(pdev, &wake);
5125
5126 if (system_state == SYSTEM_POWER_OFF) {
5127 pci_wake_from_d3(pdev, wake);
5128 pci_set_power_state(pdev, PCI_D3hot);
5129 }
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005130}
5131
5132/**
Auke Kok9a799d72007-09-15 14:07:45 -07005133 * ixgbe_update_stats - Update the board statistics counters.
5134 * @adapter: board private structure
5135 **/
5136void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5137{
Ajit Khaparde2d86f132009-10-07 02:43:49 +00005138 struct net_device *netdev = adapter->netdev;
Auke Kok9a799d72007-09-15 14:07:45 -07005139 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck5b7da512010-11-16 19:26:50 -08005140 struct ixgbe_hw_stats *hwstats = &adapter->stats;
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08005141 u64 total_mpc = 0;
5142 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
Alexander Duyck5b7da512010-11-16 19:26:50 -08005143 u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
5144 u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
Alexander Duyck8a0da212012-01-31 02:59:49 +00005145 u64 bytes = 0, packets = 0, hw_csum_rx_error = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07005146
Don Skidmored08935c2010-06-11 13:20:29 +00005147 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
5148 test_bit(__IXGBE_RESETTING, &adapter->state))
5149 return;
5150
Mallikarjuna R Chilakala94b982b2009-11-23 06:32:06 +00005151 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
Alexander Duyckf8212f92009-04-27 22:42:37 +00005152 u64 rsc_count = 0;
Mallikarjuna R Chilakala94b982b2009-11-23 06:32:06 +00005153 u64 rsc_flush = 0;
Mallikarjuna R Chilakala94b982b2009-11-23 06:32:06 +00005154 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck5b7da512010-11-16 19:26:50 -08005155 rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
5156 rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
Mallikarjuna R Chilakala94b982b2009-11-23 06:32:06 +00005157 }
5158 adapter->rsc_total_count = rsc_count;
5159 adapter->rsc_total_flush = rsc_flush;
PJ Waskiewiczd51019a2009-03-13 22:12:48 +00005160 }
5161
Alexander Duyck5b7da512010-11-16 19:26:50 -08005162 for (i = 0; i < adapter->num_rx_queues; i++) {
5163 struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
5164 non_eop_descs += rx_ring->rx_stats.non_eop_descs;
5165 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
5166 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
Alexander Duyck8a0da212012-01-31 02:59:49 +00005167 hw_csum_rx_error += rx_ring->rx_stats.csum_err;
Alexander Duyck5b7da512010-11-16 19:26:50 -08005168 bytes += rx_ring->stats.bytes;
5169 packets += rx_ring->stats.packets;
5170 }
Mallikarjuna R Chilakalaeb985f02009-12-15 11:56:59 +00005171 adapter->non_eop_descs = non_eop_descs;
Alexander Duyck5b7da512010-11-16 19:26:50 -08005172 adapter->alloc_rx_page_failed = alloc_rx_page_failed;
5173 adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
Alexander Duyck8a0da212012-01-31 02:59:49 +00005174 adapter->hw_csum_rx_error = hw_csum_rx_error;
Alexander Duyck5b7da512010-11-16 19:26:50 -08005175 netdev->stats.rx_bytes = bytes;
5176 netdev->stats.rx_packets = packets;
5177
5178 bytes = 0;
5179 packets = 0;
5180 /* gather some stats to the adapter struct that are per queue */
5181 for (i = 0; i < adapter->num_tx_queues; i++) {
5182 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
5183 restart_queue += tx_ring->tx_stats.restart_queue;
5184 tx_busy += tx_ring->tx_stats.tx_busy;
5185 bytes += tx_ring->stats.bytes;
5186 packets += tx_ring->stats.packets;
5187 }
5188 adapter->restart_queue = restart_queue;
5189 adapter->tx_busy = tx_busy;
5190 netdev->stats.tx_bytes = bytes;
5191 netdev->stats.tx_packets = packets;
Jesse Brandeburg7ca3bc52009-12-03 11:33:29 +00005192
Joe Perches7ca647b2010-09-07 21:35:40 +00005193 hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
Emil Tantilov1a70db4b2011-07-26 07:51:41 +00005194
5195 /* 8 register reads */
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08005196 for (i = 0; i < 8; i++) {
5197 /* for packet buffers not used, the register should read 0 */
5198 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
5199 missed_rx += mpc;
Joe Perches7ca647b2010-09-07 21:35:40 +00005200 hwstats->mpc[i] += mpc;
5201 total_mpc += hwstats->mpc[i];
Emil Tantilov1a70db4b2011-07-26 07:51:41 +00005202 hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
5203 hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
Alexander Duyckbd508172010-11-16 19:27:03 -08005204 switch (hw->mac.type) {
5205 case ixgbe_mac_82598EB:
Emil Tantilov1a70db4b2011-07-26 07:51:41 +00005206 hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
5207 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
5208 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
Joe Perches7ca647b2010-09-07 21:35:40 +00005209 hwstats->pxonrxc[i] +=
5210 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
Alexander Duyckbd508172010-11-16 19:27:03 -08005211 break;
5212 case ixgbe_mac_82599EB:
Don Skidmoreb93a2222010-11-16 19:27:17 -08005213 case ixgbe_mac_X540:
Alexander Duyckbd508172010-11-16 19:27:03 -08005214 hwstats->pxonrxc[i] +=
5215 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
Alexander Duyckbd508172010-11-16 19:27:03 -08005216 break;
5217 default:
5218 break;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005219 }
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08005220 }
Emil Tantilov1a70db4b2011-07-26 07:51:41 +00005221
5222 /*16 register reads */
5223 for (i = 0; i < 16; i++) {
5224 hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
5225 hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
5226 if ((hw->mac.type == ixgbe_mac_82599EB) ||
5227 (hw->mac.type == ixgbe_mac_X540)) {
5228 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
5229 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */
5230 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
5231 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)); /* to clear */
5232 }
5233 }
5234
Joe Perches7ca647b2010-09-07 21:35:40 +00005235 hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08005236 /* work around hardware counting issue */
Joe Perches7ca647b2010-09-07 21:35:40 +00005237 hwstats->gprc -= missed_rx;
Auke Kok9a799d72007-09-15 14:07:45 -07005238
John Fastabendc84d3242010-11-16 19:27:12 -08005239 ixgbe_update_xoff_received(adapter);
5240
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08005241 /* 82598 hardware only has a 32 bit counter in the high register */
Alexander Duyckbd508172010-11-16 19:27:03 -08005242 switch (hw->mac.type) {
5243 case ixgbe_mac_82598EB:
5244 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
Alexander Duyckbd508172010-11-16 19:27:03 -08005245 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
5246 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
5247 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
5248 break;
Don Skidmoreb93a2222010-11-16 19:27:17 -08005249 case ixgbe_mac_X540:
Emil Tantilov58f6bcf2011-04-21 08:43:43 +00005250 /* OS2BMC stats are X540 only*/
5251 hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
5252 hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
5253 hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC);
5254 hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC);
5255 case ixgbe_mac_82599EB:
Alexander Duycka4d4f622012-03-28 08:03:32 +00005256 for (i = 0; i < 16; i++)
5257 adapter->hw_rx_no_dma_resources +=
5258 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
Joe Perches7ca647b2010-09-07 21:35:40 +00005259 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
Alexander Duyckbd508172010-11-16 19:27:03 -08005260 IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */
Joe Perches7ca647b2010-09-07 21:35:40 +00005261 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
Alexander Duyckbd508172010-11-16 19:27:03 -08005262 IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */
Joe Perches7ca647b2010-09-07 21:35:40 +00005263 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
Alexander Duyckbd508172010-11-16 19:27:03 -08005264 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
Joe Perches7ca647b2010-09-07 21:35:40 +00005265 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
Joe Perches7ca647b2010-09-07 21:35:40 +00005266 hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
5267 hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
Yi Zou6d455222009-05-13 13:12:16 +00005268#ifdef IXGBE_FCOE
Joe Perches7ca647b2010-09-07 21:35:40 +00005269 hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
5270 hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
5271 hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
5272 hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
5273 hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
5274 hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
Amir Hanania7b859eb2011-08-31 02:07:55 +00005275 /* Add up per cpu counters for total ddp aloc fail */
Alexander Duyck5a1ee272012-05-05 17:14:28 +00005276 if (adapter->fcoe.ddp_pool) {
5277 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
5278 struct ixgbe_fcoe_ddp_pool *ddp_pool;
5279 unsigned int cpu;
5280 u64 noddp = 0, noddp_ext_buff = 0;
Amir Hanania7b859eb2011-08-31 02:07:55 +00005281 for_each_possible_cpu(cpu) {
Alexander Duyck5a1ee272012-05-05 17:14:28 +00005282 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
5283 noddp += ddp_pool->noddp;
5284 noddp_ext_buff += ddp_pool->noddp_ext_buff;
Amir Hanania7b859eb2011-08-31 02:07:55 +00005285 }
Alexander Duyck5a1ee272012-05-05 17:14:28 +00005286 hwstats->fcoe_noddp = noddp;
5287 hwstats->fcoe_noddp_ext_buff = noddp_ext_buff;
Amir Hanania7b859eb2011-08-31 02:07:55 +00005288 }
Yi Zou6d455222009-05-13 13:12:16 +00005289#endif /* IXGBE_FCOE */
Alexander Duyckbd508172010-11-16 19:27:03 -08005290 break;
5291 default:
5292 break;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005293 }
Auke Kok9a799d72007-09-15 14:07:45 -07005294 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
Joe Perches7ca647b2010-09-07 21:35:40 +00005295 hwstats->bprc += bprc;
5296 hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005297 if (hw->mac.type == ixgbe_mac_82598EB)
Joe Perches7ca647b2010-09-07 21:35:40 +00005298 hwstats->mprc -= bprc;
5299 hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
5300 hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
5301 hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
5302 hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
5303 hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
5304 hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
5305 hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
5306 hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08005307 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
Joe Perches7ca647b2010-09-07 21:35:40 +00005308 hwstats->lxontxc += lxon;
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08005309 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
Joe Perches7ca647b2010-09-07 21:35:40 +00005310 hwstats->lxofftxc += lxoff;
Joe Perches7ca647b2010-09-07 21:35:40 +00005311 hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
5312 hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08005313 /*
5314 * 82598 errata - tx of flow control packets is included in tx counters
5315 */
5316 xon_off_tot = lxon + lxoff;
Joe Perches7ca647b2010-09-07 21:35:40 +00005317 hwstats->gptc -= xon_off_tot;
5318 hwstats->mptc -= xon_off_tot;
5319 hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
5320 hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
5321 hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
5322 hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
5323 hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
5324 hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
5325 hwstats->ptc64 -= xon_off_tot;
5326 hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
5327 hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
5328 hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
5329 hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
5330 hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
5331 hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
Auke Kok9a799d72007-09-15 14:07:45 -07005332
5333 /* Fill out the OS statistics structure */
Joe Perches7ca647b2010-09-07 21:35:40 +00005334 netdev->stats.multicast = hwstats->mprc;
Auke Kok9a799d72007-09-15 14:07:45 -07005335
5336 /* Rx Errors */
Joe Perches7ca647b2010-09-07 21:35:40 +00005337 netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec;
Ajit Khaparde2d86f132009-10-07 02:43:49 +00005338 netdev->stats.rx_dropped = 0;
Joe Perches7ca647b2010-09-07 21:35:40 +00005339 netdev->stats.rx_length_errors = hwstats->rlec;
5340 netdev->stats.rx_crc_errors = hwstats->crcerrs;
Ajit Khaparde2d86f132009-10-07 02:43:49 +00005341 netdev->stats.rx_missed_errors = total_mpc;
Auke Kok9a799d72007-09-15 14:07:45 -07005342}
5343
5344/**
Alexander Duyckd034acf2011-04-27 09:25:34 +00005345 * ixgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table
Ben Hutchings49ce9c22012-07-10 10:56:00 +00005346 * @adapter: pointer to the device adapter structure
Auke Kok9a799d72007-09-15 14:07:45 -07005347 **/
Alexander Duyckd034acf2011-04-27 09:25:34 +00005348static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
Auke Kok9a799d72007-09-15 14:07:45 -07005349{
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00005350 struct ixgbe_hw *hw = &adapter->hw;
5351 int i;
5352
Alexander Duyckd034acf2011-04-27 09:25:34 +00005353 if (!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
5354 return;
5355
5356 adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
5357
5358 /* if interface is down do nothing */
5359 if (test_bit(__IXGBE_DOWN, &adapter->state))
5360 return;
5361
5362 /* do nothing if we are not using signature filters */
5363 if (!(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE))
5364 return;
5365
5366 adapter->fdir_overflow++;
5367
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00005368 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
5369 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck7d637bc2010-11-16 19:26:56 -08005370 set_bit(__IXGBE_TX_FDIR_INIT_DONE,
Alexander Duyckf0f97782011-04-22 04:08:09 +00005371 &(adapter->tx_ring[i]->state));
Alexander Duyckd034acf2011-04-27 09:25:34 +00005372 /* re-enable flow director interrupts */
5373 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00005374 } else {
Emil Tantilov396e7992010-07-01 20:05:12 +00005375 e_err(probe, "failed to finish FDIR re-initialization, "
Emil Tantilov849c4542010-06-03 16:53:41 +00005376 "ignored adding FDIR ATR filters\n");
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00005377 }
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00005378}
5379
Alexander Duyck93c52dd2011-04-22 04:07:54 +00005380/**
5381 * ixgbe_check_hang_subtask - check for hung queues and dropped interrupts
Ben Hutchings49ce9c22012-07-10 10:56:00 +00005382 * @adapter: pointer to the device adapter structure
Alexander Duyck93c52dd2011-04-22 04:07:54 +00005383 *
5384 * This function serves two purposes. First it strobes the interrupt lines
Stephen Hemminger52f33af2011-12-22 16:34:52 +00005385 * in order to make certain interrupts are occurring. Secondly it sets the
Alexander Duyck93c52dd2011-04-22 04:07:54 +00005386 * bits needed to check for TX hangs. As a result we should immediately
Stephen Hemminger52f33af2011-12-22 16:34:52 +00005387 * determine if a hang has occurred.
Alexander Duyck93c52dd2011-04-22 04:07:54 +00005388 */
5389static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
5390{
Auke Kok9a799d72007-09-15 14:07:45 -07005391 struct ixgbe_hw *hw = &adapter->hw;
5392 u64 eics = 0;
5393 int i;
5394
Alexander Duyck93c52dd2011-04-22 04:07:54 +00005395 /* If we're down or resetting, just bail */
5396 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
5397 test_bit(__IXGBE_RESETTING, &adapter->state))
5398 return;
Alexander Duyckfe49f042009-06-04 16:00:09 +00005399
Alexander Duyck93c52dd2011-04-22 04:07:54 +00005400 /* Force detection of hung controller */
5401 if (netif_carrier_ok(adapter->netdev)) {
5402 for (i = 0; i < adapter->num_tx_queues; i++)
5403 set_check_for_tx_hang(adapter->tx_ring[i]);
5404 }
Alexander Duyckfe49f042009-06-04 16:00:09 +00005405
Jesse Brandeburg22d5a712009-03-19 01:24:04 +00005406 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
Alexander Duyckfe49f042009-06-04 16:00:09 +00005407 /*
5408 * for legacy and MSI interrupts don't set any bits
Jesse Brandeburg22d5a712009-03-19 01:24:04 +00005409 * that are enabled for EIAM, because this operation
Alexander Duyckfe49f042009-06-04 16:00:09 +00005410 * would set *both* EIMS and EICS for any bit in EIAM
5411 */
5412 IXGBE_WRITE_REG(hw, IXGBE_EICS,
5413 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
Alexander Duyck93c52dd2011-04-22 04:07:54 +00005414 } else {
5415 /* get one bit for every active tx/rx interrupt vector */
Alexander Duyck49c7ffb2012-05-05 05:30:43 +00005416 for (i = 0; i < adapter->num_q_vectors; i++) {
Alexander Duyck93c52dd2011-04-22 04:07:54 +00005417 struct ixgbe_q_vector *qv = adapter->q_vector[i];
Alexander Duyckefe3d3c2011-07-15 03:05:21 +00005418 if (qv->rx.ring || qv->tx.ring)
Alexander Duyck93c52dd2011-04-22 04:07:54 +00005419 eics |= ((u64)1 << i);
5420 }
Alexander Duyckfe49f042009-06-04 16:00:09 +00005421 }
5422
Alexander Duyck93c52dd2011-04-22 04:07:54 +00005423 /* Cause software interrupt to ensure rings are cleaned */
Alexander Duyckfe49f042009-06-04 16:00:09 +00005424 ixgbe_irq_rearm_queues(adapter, eics);
5425
Alexander Duyckfe49f042009-06-04 16:00:09 +00005426}
5427
5428/**
Alexander Duyck93c52dd2011-04-22 04:07:54 +00005429 * ixgbe_watchdog_update_link - update the link status
Ben Hutchings49ce9c22012-07-10 10:56:00 +00005430 * @adapter: pointer to the device adapter structure
5431 * @link_speed: pointer to a u32 to store the link_speed
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07005432 **/
Alexander Duyck93c52dd2011-04-22 04:07:54 +00005433static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005434{
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005435 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck93c52dd2011-04-22 04:07:54 +00005436 u32 link_speed = adapter->link_speed;
5437 bool link_up = adapter->link_up;
Alexander Duyck041441d2012-04-19 17:48:48 +00005438 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00005439
Alexander Duyck93c52dd2011-04-22 04:07:54 +00005440 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
5441 return;
5442
5443 if (hw->mac.ops.check_link) {
5444 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00005445 } else {
Alexander Duyck93c52dd2011-04-22 04:07:54 +00005446 /* always assume link is up, if no check link function */
5447 link_speed = IXGBE_LINK_SPEED_10GB_FULL;
5448 link_up = true;
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00005449 }
Alexander Duyck041441d2012-04-19 17:48:48 +00005450
5451 if (adapter->ixgbe_ieee_pfc)
5452 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
5453
Alexander Duyck3ebe8fd2012-04-25 04:36:38 +00005454 if (link_up && !((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && pfc_en)) {
Alexander Duyck041441d2012-04-19 17:48:48 +00005455 hw->mac.ops.fc_enable(hw);
Alexander Duyck3ebe8fd2012-04-25 04:36:38 +00005456 ixgbe_set_rx_drop_en(adapter);
5457 }
Alexander Duyck93c52dd2011-04-22 04:07:54 +00005458
5459 if (link_up ||
5460 time_after(jiffies, (adapter->link_check_timeout +
5461 IXGBE_TRY_LINK_TIMEOUT))) {
5462 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
5463 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
5464 IXGBE_WRITE_FLUSH(hw);
5465 }
5466
5467 adapter->link_up = link_up;
5468 adapter->link_speed = link_speed;
5469}
5470
Alexander Duyck107d3012012-10-02 00:17:03 +00005471static void ixgbe_update_default_up(struct ixgbe_adapter *adapter)
5472{
5473#ifdef CONFIG_IXGBE_DCB
5474 struct net_device *netdev = adapter->netdev;
5475 struct dcb_app app = {
5476 .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE,
5477 .protocol = 0,
5478 };
5479 u8 up = 0;
5480
5481 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)
5482 up = dcb_ieee_getapp_mask(netdev, &app);
5483
5484 adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0;
5485#endif
5486}
5487
Alexander Duyck93c52dd2011-04-22 04:07:54 +00005488/**
5489 * ixgbe_watchdog_link_is_up - update netif_carrier status and
5490 * print link up message
Ben Hutchings49ce9c22012-07-10 10:56:00 +00005491 * @adapter: pointer to the device adapter structure
Alexander Duyck93c52dd2011-04-22 04:07:54 +00005492 **/
5493static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
5494{
5495 struct net_device *netdev = adapter->netdev;
5496 struct ixgbe_hw *hw = &adapter->hw;
5497 u32 link_speed = adapter->link_speed;
5498 bool flow_rx, flow_tx;
5499
5500 /* only continue if link was previously down */
5501 if (netif_carrier_ok(netdev))
5502 return;
5503
5504 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
5505
5506 switch (hw->mac.type) {
5507 case ixgbe_mac_82598EB: {
5508 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
5509 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
5510 flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
5511 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
5512 }
5513 break;
5514 case ixgbe_mac_X540:
5515 case ixgbe_mac_82599EB: {
5516 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
5517 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
5518 flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
5519 flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
5520 }
5521 break;
5522 default:
5523 flow_tx = false;
5524 flow_rx = false;
5525 break;
5526 }
Jacob Keller3a6a4ed2012-05-01 05:24:58 +00005527
Jacob Keller1a71ab22012-08-25 03:54:19 +00005528 if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
5529 ixgbe_ptp_start_cyclecounter(adapter);
Jacob Keller3a6a4ed2012-05-01 05:24:58 +00005530
Alexander Duyck93c52dd2011-04-22 04:07:54 +00005531 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
5532 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
5533 "10 Gbps" :
5534 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
5535 "1 Gbps" :
5536 (link_speed == IXGBE_LINK_SPEED_100_FULL ?
5537 "100 Mbps" :
5538 "unknown speed"))),
5539 ((flow_rx && flow_tx) ? "RX/TX" :
5540 (flow_rx ? "RX" :
5541 (flow_tx ? "TX" : "None"))));
5542
5543 netif_carrier_on(netdev);
Alexander Duyck93c52dd2011-04-22 04:07:54 +00005544 ixgbe_check_vf_rate_limit(adapter);
Alexander Duyckbefa2af2012-05-05 05:30:38 +00005545
Alexander Duyck107d3012012-10-02 00:17:03 +00005546 /* update the default user priority for VFs */
5547 ixgbe_update_default_up(adapter);
5548
Alexander Duyckbefa2af2012-05-05 05:30:38 +00005549 /* ping all the active vfs to let them know link has changed */
5550 ixgbe_ping_all_vfs(adapter);
Alexander Duyck93c52dd2011-04-22 04:07:54 +00005551}
5552
5553/**
5554 * ixgbe_watchdog_link_is_down - update netif_carrier status and
5555 * print link down message
Ben Hutchings49ce9c22012-07-10 10:56:00 +00005556 * @adapter: pointer to the adapter structure
Alexander Duyck93c52dd2011-04-22 04:07:54 +00005557 **/
Alexander Duyck581330b2012-02-08 07:51:47 +00005558static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
Alexander Duyck93c52dd2011-04-22 04:07:54 +00005559{
5560 struct net_device *netdev = adapter->netdev;
5561 struct ixgbe_hw *hw = &adapter->hw;
5562
5563 adapter->link_up = false;
5564 adapter->link_speed = 0;
5565
5566 /* only continue if link was up previously */
5567 if (!netif_carrier_ok(netdev))
5568 return;
5569
5570 /* poll for SFP+ cable when link is down */
5571 if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
5572 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
5573
Jacob Keller1a71ab22012-08-25 03:54:19 +00005574 if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
5575 ixgbe_ptp_start_cyclecounter(adapter);
Jacob Keller3a6a4ed2012-05-01 05:24:58 +00005576
Alexander Duyck93c52dd2011-04-22 04:07:54 +00005577 e_info(drv, "NIC Link is Down\n");
5578 netif_carrier_off(netdev);
Alexander Duyckbefa2af2012-05-05 05:30:38 +00005579
5580 /* ping all the active vfs to let them know link has changed */
5581 ixgbe_ping_all_vfs(adapter);
Alexander Duyck93c52dd2011-04-22 04:07:54 +00005582}
5583
5584/**
5585 * ixgbe_watchdog_flush_tx - flush queues on link down
Ben Hutchings49ce9c22012-07-10 10:56:00 +00005586 * @adapter: pointer to the device adapter structure
Alexander Duyck93c52dd2011-04-22 04:07:54 +00005587 **/
5588static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
5589{
5590 int i;
5591 int some_tx_pending = 0;
5592
5593 if (!netif_carrier_ok(adapter->netdev)) {
5594 for (i = 0; i < adapter->num_tx_queues; i++) {
5595 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
5596 if (tx_ring->next_to_use != tx_ring->next_to_clean) {
5597 some_tx_pending = 1;
5598 break;
5599 }
5600 }
5601
5602 if (some_tx_pending) {
5603 /* We've lost link, so the controller stops DMA,
5604 * but we've got queued Tx work that's never going
5605 * to get done, so reset controller to flush Tx.
5606 * (Do the reset outside of interrupt context).
5607 */
Alexander Duyckc83c6cb2011-04-27 09:21:16 +00005608 adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
Alexander Duyck93c52dd2011-04-22 04:07:54 +00005609 }
5610 }
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00005611}
5612
Greg Rosea985b6c32010-11-18 03:02:52 +00005613static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
5614{
5615 u32 ssvpc;
5616
Greg Rose0584d992012-08-08 00:00:58 +00005617 /* Do not perform spoof check for 82598 or if not in IOV mode */
5618 if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
5619 adapter->num_vfs == 0)
Greg Rosea985b6c32010-11-18 03:02:52 +00005620 return;
5621
5622 ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC);
5623
5624 /*
5625 * ssvpc register is cleared on read, if zero then no
5626 * spoofed packets in the last interval.
5627 */
5628 if (!ssvpc)
5629 return;
5630
Emil Tantilovd6ea0752012-08-08 06:28:37 +00005631 e_warn(drv, "%u Spoofed packets detected\n", ssvpc);
Greg Rosea985b6c32010-11-18 03:02:52 +00005632}
5633
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00005634/**
Alexander Duyck93c52dd2011-04-22 04:07:54 +00005635 * ixgbe_watchdog_subtask - check and bring link up
Ben Hutchings49ce9c22012-07-10 10:56:00 +00005636 * @adapter: pointer to the device adapter structure
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07005637 **/
Alexander Duyck93c52dd2011-04-22 04:07:54 +00005638static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07005639{
Alexander Duyck93c52dd2011-04-22 04:07:54 +00005640 /* if interface is down do nothing */
Emil Tantilov7edebf92011-08-27 07:18:37 +00005641 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
5642 test_bit(__IXGBE_RESETTING, &adapter->state))
Alexander Duyck93c52dd2011-04-22 04:07:54 +00005643 return;
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07005644
Alexander Duyck93c52dd2011-04-22 04:07:54 +00005645 ixgbe_watchdog_update_link(adapter);
John Fastabend10eec952010-02-03 14:23:32 +00005646
Alexander Duyck93c52dd2011-04-22 04:07:54 +00005647 if (adapter->link_up)
5648 ixgbe_watchdog_link_is_up(adapter);
5649 else
5650 ixgbe_watchdog_link_is_down(adapter);
Nelson, Shannonbc59fcd2009-04-27 22:43:12 +00005651
Greg Rosea985b6c32010-11-18 03:02:52 +00005652 ixgbe_spoof_check(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07005653 ixgbe_update_stats(adapter);
Alexander Duyck93c52dd2011-04-22 04:07:54 +00005654
5655 ixgbe_watchdog_flush_tx(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07005656}
5657
Alexander Duyck70864002011-04-27 09:13:56 +00005658/**
5659 * ixgbe_sfp_detection_subtask - poll for SFP+ cable
Ben Hutchings49ce9c22012-07-10 10:56:00 +00005660 * @adapter: the ixgbe adapter structure
Alexander Duyck70864002011-04-27 09:13:56 +00005661 **/
5662static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
5663{
5664 struct ixgbe_hw *hw = &adapter->hw;
5665 s32 err;
5666
5667 /* not searching for SFP so there is nothing to do here */
5668 if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) &&
5669 !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
5670 return;
5671
5672 /* someone else is in init, wait until next service event */
5673 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
5674 return;
5675
5676 err = hw->phy.ops.identify_sfp(hw);
5677 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
5678 goto sfp_out;
5679
5680 if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
5681 /* If no cable is present, then we need to reset
5682 * the next time we find a good cable. */
5683 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
5684 }
5685
5686 /* exit on error */
5687 if (err)
5688 goto sfp_out;
5689
5690 /* exit if reset not needed */
5691 if (!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
5692 goto sfp_out;
5693
5694 adapter->flags2 &= ~IXGBE_FLAG2_SFP_NEEDS_RESET;
5695
5696 /*
5697 * A module may be identified correctly, but the EEPROM may not have
5698 * support for that module. setup_sfp() will fail in that case, so
5699 * we should not allow that module to load.
5700 */
5701 if (hw->mac.type == ixgbe_mac_82598EB)
5702 err = hw->phy.ops.reset(hw);
5703 else
5704 err = hw->mac.ops.setup_sfp(hw);
5705
5706 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
5707 goto sfp_out;
5708
5709 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
5710 e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type);
5711
5712sfp_out:
5713 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
5714
5715 if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) &&
5716 (adapter->netdev->reg_state == NETREG_REGISTERED)) {
5717 e_dev_err("failed to initialize because an unsupported "
5718 "SFP+ module type was detected.\n");
5719 e_dev_err("Reload the driver after installing a "
5720 "supported module.\n");
5721 unregister_netdev(adapter->netdev);
5722 }
5723}
5724
5725/**
5726 * ixgbe_sfp_link_config_subtask - set up link SFP after module install
Ben Hutchings49ce9c22012-07-10 10:56:00 +00005727 * @adapter: the ixgbe adapter structure
Alexander Duyck70864002011-04-27 09:13:56 +00005728 **/
5729static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
5730{
5731 struct ixgbe_hw *hw = &adapter->hw;
5732 u32 autoneg;
5733 bool negotiation;
5734
5735 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG))
5736 return;
5737
5738 /* someone else is in init, wait until next service event */
5739 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
5740 return;
5741
5742 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
5743
5744 autoneg = hw->phy.autoneg_advertised;
5745 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
5746 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation);
Alexander Duyck70864002011-04-27 09:13:56 +00005747 if (hw->mac.ops.setup_link)
5748 hw->mac.ops.setup_link(hw, autoneg, negotiation, true);
5749
5750 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
5751 adapter->link_check_timeout = jiffies;
5752 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
5753}
5754
Greg Rose83c61fa2011-09-07 05:59:35 +00005755#ifdef CONFIG_PCI_IOV
5756static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
5757{
5758 int vf;
5759 struct ixgbe_hw *hw = &adapter->hw;
5760 struct net_device *netdev = adapter->netdev;
5761 u32 gpc;
5762 u32 ciaa, ciad;
5763
5764 gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC);
5765 if (gpc) /* If incrementing then no need for the check below */
5766 return;
5767 /*
5768 * Check to see if a bad DMA write target from an errant or
5769 * malicious VF has caused a PCIe error. If so then we can
5770 * issue a VFLR to the offending VF(s) and then resume without
5771 * requesting a full slot reset.
5772 */
5773
5774 for (vf = 0; vf < adapter->num_vfs; vf++) {
5775 ciaa = (vf << 16) | 0x80000000;
5776 /* 32 bit read so align, we really want status at offset 6 */
5777 ciaa |= PCI_COMMAND;
5778 IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa);
5779 ciad = IXGBE_READ_REG(hw, IXGBE_CIAD_82599);
5780 ciaa &= 0x7FFFFFFF;
5781 /* disable debug mode asap after reading data */
5782 IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa);
5783 /* Get the upper 16 bits which will be the PCI status reg */
5784 ciad >>= 16;
5785 if (ciad & PCI_STATUS_REC_MASTER_ABORT) {
5786 netdev_err(netdev, "VF %d Hung DMA\n", vf);
5787 /* Issue VFLR */
5788 ciaa = (vf << 16) | 0x80000000;
5789 ciaa |= 0xA8;
5790 IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa);
5791 ciad = 0x00008000; /* VFLR */
5792 IXGBE_WRITE_REG(hw, IXGBE_CIAD_82599, ciad);
5793 ciaa &= 0x7FFFFFFF;
5794 IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa);
5795 }
5796 }
5797}
5798
5799#endif
Alexander Duyck70864002011-04-27 09:13:56 +00005800/**
5801 * ixgbe_service_timer - Timer Call-back
5802 * @data: pointer to adapter cast into an unsigned long
5803 **/
5804static void ixgbe_service_timer(unsigned long data)
5805{
5806 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
5807 unsigned long next_event_offset;
Greg Rose83c61fa2011-09-07 05:59:35 +00005808 bool ready = true;
Alexander Duyck70864002011-04-27 09:13:56 +00005809
5810 /* poll faster when waiting for link */
5811 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
5812 next_event_offset = HZ / 10;
5813 else
5814 next_event_offset = HZ * 2;
5815
Greg Rose83c61fa2011-09-07 05:59:35 +00005816#ifdef CONFIG_PCI_IOV
Alexander Duyck6bb78cf2012-02-08 07:51:22 +00005817 /*
5818 * don't bother with SR-IOV VF DMA hang check if there are
5819 * no VFs or the link is down
5820 */
5821 if (!adapter->num_vfs ||
5822 (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
5823 goto normal_timer_service;
5824
5825 /* If we have VFs allocated then we must check for DMA hangs */
5826 ixgbe_check_for_bad_vf(adapter);
5827 next_event_offset = HZ / 50;
5828 adapter->timer_event_accumulator++;
5829
5830 if (adapter->timer_event_accumulator >= 100)
5831 adapter->timer_event_accumulator = 0;
5832 else
5833 ready = false;
5834
5835normal_timer_service:
Greg Rose83c61fa2011-09-07 05:59:35 +00005836#endif
Alexander Duyck70864002011-04-27 09:13:56 +00005837 /* Reset the timer */
5838 mod_timer(&adapter->service_timer, next_event_offset + jiffies);
5839
Greg Rose83c61fa2011-09-07 05:59:35 +00005840 if (ready)
5841 ixgbe_service_event_schedule(adapter);
Alexander Duyck70864002011-04-27 09:13:56 +00005842}
5843
Alexander Duyckc83c6cb2011-04-27 09:21:16 +00005844static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
5845{
5846 if (!(adapter->flags2 & IXGBE_FLAG2_RESET_REQUESTED))
5847 return;
5848
5849 adapter->flags2 &= ~IXGBE_FLAG2_RESET_REQUESTED;
5850
5851 /* If we're already down or resetting, just bail */
5852 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
5853 test_bit(__IXGBE_RESETTING, &adapter->state))
5854 return;
5855
5856 ixgbe_dump(adapter);
5857 netdev_err(adapter->netdev, "Reset adapter\n");
5858 adapter->tx_timeout_count++;
5859
5860 ixgbe_reinit_locked(adapter);
5861}
5862
Alexander Duyck70864002011-04-27 09:13:56 +00005863/**
5864 * ixgbe_service_task - manages and runs subtasks
5865 * @work: pointer to work_struct containing our data
5866 **/
5867static void ixgbe_service_task(struct work_struct *work)
5868{
5869 struct ixgbe_adapter *adapter = container_of(work,
5870 struct ixgbe_adapter,
5871 service_task);
5872
Alexander Duyckc83c6cb2011-04-27 09:21:16 +00005873 ixgbe_reset_subtask(adapter);
Alexander Duyck70864002011-04-27 09:13:56 +00005874 ixgbe_sfp_detection_subtask(adapter);
5875 ixgbe_sfp_link_config_subtask(adapter);
Alexander Duyckf0f97782011-04-22 04:08:09 +00005876 ixgbe_check_overtemp_subtask(adapter);
Alexander Duyck93c52dd2011-04-22 04:07:54 +00005877 ixgbe_watchdog_subtask(adapter);
Alexander Duyckd034acf2011-04-27 09:25:34 +00005878 ixgbe_fdir_reinit_subtask(adapter);
Alexander Duyck93c52dd2011-04-22 04:07:54 +00005879 ixgbe_check_hang_subtask(adapter);
Jacob Keller3a6a4ed2012-05-01 05:24:58 +00005880 ixgbe_ptp_overflow_check(adapter);
Alexander Duyck70864002011-04-27 09:13:56 +00005881
5882 ixgbe_service_event_complete(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07005883}
5884
Alexander Duyckfd0db0e2012-02-08 07:50:56 +00005885static int ixgbe_tso(struct ixgbe_ring *tx_ring,
5886 struct ixgbe_tx_buffer *first,
Alexander Duyck244e27a2012-02-08 07:51:11 +00005887 u8 *hdr_len)
Alexander Duyck897ab152011-05-27 05:31:47 +00005888{
Alexander Duyckfd0db0e2012-02-08 07:50:56 +00005889 struct sk_buff *skb = first->skb;
Alexander Duyck897ab152011-05-27 05:31:47 +00005890 u32 vlan_macip_lens, type_tucmd;
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07005891 u32 mss_l4len_idx, l4len;
Auke Kok9a799d72007-09-15 14:07:45 -07005892
Alexander Duyck897ab152011-05-27 05:31:47 +00005893 if (!skb_is_gso(skb))
5894 return 0;
Auke Kok9a799d72007-09-15 14:07:45 -07005895
Alexander Duyck897ab152011-05-27 05:31:47 +00005896 if (skb_header_cloned(skb)) {
Alexander Duyck244e27a2012-02-08 07:51:11 +00005897 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
Alexander Duyck897ab152011-05-27 05:31:47 +00005898 if (err)
5899 return err;
Joe Perches7ca647b2010-09-07 21:35:40 +00005900 }
5901
Alexander Duyck897ab152011-05-27 05:31:47 +00005902 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
5903 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
5904
Alexander Duyck244e27a2012-02-08 07:51:11 +00005905 if (first->protocol == __constant_htons(ETH_P_IP)) {
Alexander Duyck897ab152011-05-27 05:31:47 +00005906 struct iphdr *iph = ip_hdr(skb);
5907 iph->tot_len = 0;
5908 iph->check = 0;
5909 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5910 iph->daddr, 0,
5911 IPPROTO_TCP,
5912 0);
5913 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
Alexander Duyck244e27a2012-02-08 07:51:11 +00005914 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
5915 IXGBE_TX_FLAGS_CSUM |
5916 IXGBE_TX_FLAGS_IPV4;
Alexander Duyck897ab152011-05-27 05:31:47 +00005917 } else if (skb_is_gso_v6(skb)) {
5918 ipv6_hdr(skb)->payload_len = 0;
5919 tcp_hdr(skb)->check =
5920 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
5921 &ipv6_hdr(skb)->daddr,
5922 0, IPPROTO_TCP, 0);
Alexander Duyck244e27a2012-02-08 07:51:11 +00005923 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
5924 IXGBE_TX_FLAGS_CSUM;
Alexander Duyck897ab152011-05-27 05:31:47 +00005925 }
5926
Alexander Duyck091a6242012-02-08 07:51:01 +00005927 /* compute header lengths */
Alexander Duyck897ab152011-05-27 05:31:47 +00005928 l4len = tcp_hdrlen(skb);
5929 *hdr_len = skb_transport_offset(skb) + l4len;
5930
Alexander Duyck091a6242012-02-08 07:51:01 +00005931 /* update gso size and bytecount with header size */
5932 first->gso_segs = skb_shinfo(skb)->gso_segs;
5933 first->bytecount += (first->gso_segs - 1) * *hdr_len;
5934
Alexander Duyck897ab152011-05-27 05:31:47 +00005935 /* mss_l4len_id: use 1 as index for TSO */
5936 mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
5937 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
5938 mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
5939
5940 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
5941 vlan_macip_lens = skb_network_header_len(skb);
5942 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
Alexander Duyck244e27a2012-02-08 07:51:11 +00005943 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
Alexander Duyck897ab152011-05-27 05:31:47 +00005944
5945 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd,
Alexander Duyck244e27a2012-02-08 07:51:11 +00005946 mss_l4len_idx);
Alexander Duyck897ab152011-05-27 05:31:47 +00005947
5948 return 1;
Joe Perches7ca647b2010-09-07 21:35:40 +00005949}
5950
Alexander Duyck244e27a2012-02-08 07:51:11 +00005951static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
5952 struct ixgbe_tx_buffer *first)
Auke Kok9a799d72007-09-15 14:07:45 -07005953{
Alexander Duyckfd0db0e2012-02-08 07:50:56 +00005954 struct sk_buff *skb = first->skb;
Alexander Duyck897ab152011-05-27 05:31:47 +00005955 u32 vlan_macip_lens = 0;
5956 u32 mss_l4len_idx = 0;
5957 u32 type_tucmd = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07005958
Alexander Duyck897ab152011-05-27 05:31:47 +00005959 if (skb->ip_summed != CHECKSUM_PARTIAL) {
Alexander Duyck62748b72012-07-20 08:09:01 +00005960 if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN)) {
5961 if (unlikely(skb->no_fcs))
5962 first->tx_flags |= IXGBE_TX_FLAGS_NO_IFCS;
5963 if (!(first->tx_flags & IXGBE_TX_FLAGS_TXSW))
5964 return;
5965 }
Alexander Duyck897ab152011-05-27 05:31:47 +00005966 } else {
5967 u8 l4_hdr = 0;
Alexander Duyck244e27a2012-02-08 07:51:11 +00005968 switch (first->protocol) {
Alexander Duyck897ab152011-05-27 05:31:47 +00005969 case __constant_htons(ETH_P_IP):
5970 vlan_macip_lens |= skb_network_header_len(skb);
5971 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
5972 l4_hdr = ip_hdr(skb)->protocol;
5973 break;
5974 case __constant_htons(ETH_P_IPV6):
5975 vlan_macip_lens |= skb_network_header_len(skb);
5976 l4_hdr = ipv6_hdr(skb)->nexthdr;
5977 break;
5978 default:
5979 if (unlikely(net_ratelimit())) {
5980 dev_warn(tx_ring->dev,
5981 "partial checksum but proto=%x!\n",
Alexander Duyck244e27a2012-02-08 07:51:11 +00005982 first->protocol);
Alexander Duyck897ab152011-05-27 05:31:47 +00005983 }
5984 break;
5985 }
Auke Kok9a799d72007-09-15 14:07:45 -07005986
Alexander Duyck897ab152011-05-27 05:31:47 +00005987 switch (l4_hdr) {
5988 case IPPROTO_TCP:
5989 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
5990 mss_l4len_idx = tcp_hdrlen(skb) <<
5991 IXGBE_ADVTXD_L4LEN_SHIFT;
5992 break;
5993 case IPPROTO_SCTP:
5994 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
5995 mss_l4len_idx = sizeof(struct sctphdr) <<
5996 IXGBE_ADVTXD_L4LEN_SHIFT;
5997 break;
5998 case IPPROTO_UDP:
5999 mss_l4len_idx = sizeof(struct udphdr) <<
6000 IXGBE_ADVTXD_L4LEN_SHIFT;
6001 break;
6002 default:
6003 if (unlikely(net_ratelimit())) {
6004 dev_warn(tx_ring->dev,
6005 "partial checksum but l4 proto=%x!\n",
Alexander Duyck244e27a2012-02-08 07:51:11 +00006006 l4_hdr);
Alexander Duyck897ab152011-05-27 05:31:47 +00006007 }
6008 break;
6009 }
Alexander Duyck244e27a2012-02-08 07:51:11 +00006010
6011 /* update TX checksum flag */
6012 first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
Auke Kok9a799d72007-09-15 14:07:45 -07006013 }
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07006014
Alexander Duyck244e27a2012-02-08 07:51:11 +00006015 /* vlan_macip_lens: MACLEN, VLAN tag */
Alexander Duyck897ab152011-05-27 05:31:47 +00006016 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
Alexander Duyck244e27a2012-02-08 07:51:11 +00006017 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
Alexander Duyck897ab152011-05-27 05:31:47 +00006018
6019 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0,
6020 type_tucmd, mss_l4len_idx);
Auke Kok9a799d72007-09-15 14:07:45 -07006021}
6022
Alexander Duyckd3d00232011-07-15 02:31:25 +00006023static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
6024{
6025 /* set type for advanced descriptor with frame checksum insertion */
6026 __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
Alexander Duyckd3d00232011-07-15 02:31:25 +00006027 IXGBE_ADVTXD_DCMD_DEXT);
6028
6029 /* set HW vlan bit if vlan is present */
Alexander Duyck66f32a82011-06-29 05:43:22 +00006030 if (tx_flags & IXGBE_TX_FLAGS_HW_VLAN)
Alexander Duyckd3d00232011-07-15 02:31:25 +00006031 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
6032
Jacob Keller3a6a4ed2012-05-01 05:24:58 +00006033 if (tx_flags & IXGBE_TX_FLAGS_TSTAMP)
6034 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_MAC_TSTAMP);
Jacob Keller3a6a4ed2012-05-01 05:24:58 +00006035
Alexander Duyckd3d00232011-07-15 02:31:25 +00006036 /* set segmentation enable bits for TSO/FSO */
6037#ifdef IXGBE_FCOE
Alexander Duyck93f5b3c2012-02-08 07:50:45 +00006038 if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_FSO))
Alexander Duyckd3d00232011-07-15 02:31:25 +00006039#else
6040 if (tx_flags & IXGBE_TX_FLAGS_TSO)
6041#endif
6042 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
6043
Alexander Duyck62748b72012-07-20 08:09:01 +00006044 /* insert frame checksum */
6045 if (!(tx_flags & IXGBE_TX_FLAGS_NO_IFCS))
6046 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_IFCS);
6047
Alexander Duyckd3d00232011-07-15 02:31:25 +00006048 return cmd_type;
6049}
6050
Alexander Duyck729739b2012-02-08 07:51:06 +00006051static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
6052 u32 tx_flags, unsigned int paylen)
Alexander Duyckd3d00232011-07-15 02:31:25 +00006053{
Alexander Duyck93f5b3c2012-02-08 07:50:45 +00006054 __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
Alexander Duyckd3d00232011-07-15 02:31:25 +00006055
6056 /* enable L4 checksum for TSO and TX checksum offload */
6057 if (tx_flags & IXGBE_TX_FLAGS_CSUM)
6058 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
6059
Alexander Duyck93f5b3c2012-02-08 07:50:45 +00006060 /* enble IPv4 checksum for TSO */
6061 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
6062 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
Alexander Duyckd3d00232011-07-15 02:31:25 +00006063
Alexander Duyck93f5b3c2012-02-08 07:50:45 +00006064 /* use index 1 context for TSO/FSO/FCOE */
6065#ifdef IXGBE_FCOE
6066 if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_FCOE))
6067#else
6068 if (tx_flags & IXGBE_TX_FLAGS_TSO)
Alexander Duyckd3d00232011-07-15 02:31:25 +00006069#endif
Alexander Duyck93f5b3c2012-02-08 07:50:45 +00006070 olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT);
6071
Alexander Duyck7f9643f2011-06-29 05:43:27 +00006072 /*
6073 * Check Context must be set if Tx switch is enabled, which it
6074 * always is for case where virtual functions are running
6075 */
Alexander Duyck93f5b3c2012-02-08 07:50:45 +00006076#ifdef IXGBE_FCOE
6077 if (tx_flags & (IXGBE_TX_FLAGS_TXSW | IXGBE_TX_FLAGS_FCOE))
6078#else
Alexander Duyck7f9643f2011-06-29 05:43:27 +00006079 if (tx_flags & IXGBE_TX_FLAGS_TXSW)
Alexander Duyck93f5b3c2012-02-08 07:50:45 +00006080#endif
Alexander Duyck7f9643f2011-06-29 05:43:27 +00006081 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
6082
Alexander Duyck729739b2012-02-08 07:51:06 +00006083 tx_desc->read.olinfo_status = olinfo_status;
Alexander Duyckd3d00232011-07-15 02:31:25 +00006084}
6085
6086#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
6087 IXGBE_TXD_CMD_RS)
6088
6089static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
Alexander Duyckd3d00232011-07-15 02:31:25 +00006090 struct ixgbe_tx_buffer *first,
Alexander Duyckd3d00232011-07-15 02:31:25 +00006091 const u8 hdr_len)
Auke Kok9a799d72007-09-15 14:07:45 -07006092{
Alexander Duyckd3d00232011-07-15 02:31:25 +00006093 dma_addr_t dma;
Alexander Duyck729739b2012-02-08 07:51:06 +00006094 struct sk_buff *skb = first->skb;
6095 struct ixgbe_tx_buffer *tx_buffer;
6096 union ixgbe_adv_tx_desc *tx_desc;
6097 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
Alexander Duyckd3d00232011-07-15 02:31:25 +00006098 unsigned int data_len = skb->data_len;
6099 unsigned int size = skb_headlen(skb);
Alexander Duyck729739b2012-02-08 07:51:06 +00006100 unsigned int paylen = skb->len - hdr_len;
Alexander Duyck244e27a2012-02-08 07:51:11 +00006101 u32 tx_flags = first->tx_flags;
Alexander Duyck729739b2012-02-08 07:51:06 +00006102 __le32 cmd_type;
Alexander Duyckd3d00232011-07-15 02:31:25 +00006103 u16 i = tx_ring->next_to_use;
Auke Kok9a799d72007-09-15 14:07:45 -07006104
Alexander Duyck729739b2012-02-08 07:51:06 +00006105 tx_desc = IXGBE_TX_DESC(tx_ring, i);
6106
6107 ixgbe_tx_olinfo_status(tx_desc, tx_flags, paylen);
6108 cmd_type = ixgbe_tx_cmd_type(tx_flags);
6109
Alexander Duyckd3d00232011-07-15 02:31:25 +00006110#ifdef IXGBE_FCOE
6111 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
Alexander Duyck729739b2012-02-08 07:51:06 +00006112 if (data_len < sizeof(struct fcoe_crc_eof)) {
Alexander Duyckd3d00232011-07-15 02:31:25 +00006113 size -= sizeof(struct fcoe_crc_eof) - data_len;
6114 data_len = 0;
Alexander Duyck729739b2012-02-08 07:51:06 +00006115 } else {
6116 data_len -= sizeof(struct fcoe_crc_eof);
Alexander Duyck44df32c2009-03-31 21:34:23 +00006117 }
Auke Kok9a799d72007-09-15 14:07:45 -07006118 }
6119
Alexander Duyckd3d00232011-07-15 02:31:25 +00006120#endif
Alexander Duyck729739b2012-02-08 07:51:06 +00006121 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
6122 if (dma_mapping_error(tx_ring->dev, dma))
Alexander Duyckd3d00232011-07-15 02:31:25 +00006123 goto dma_error;
6124
Alexander Duyck729739b2012-02-08 07:51:06 +00006125 /* record length, and DMA address */
6126 dma_unmap_len_set(first, len, size);
6127 dma_unmap_addr_set(first, dma, dma);
Alexander Duyckd3d00232011-07-15 02:31:25 +00006128
Alexander Duyck729739b2012-02-08 07:51:06 +00006129 tx_desc->read.buffer_addr = cpu_to_le64(dma);
Alexander Duyckd3d00232011-07-15 02:31:25 +00006130
6131 for (;;) {
Alexander Duyck729739b2012-02-08 07:51:06 +00006132 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
Alexander Duyckd3d00232011-07-15 02:31:25 +00006133 tx_desc->read.cmd_type_len =
6134 cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
Alexander Duyckd3d00232011-07-15 02:31:25 +00006135
Alexander Duyckd3d00232011-07-15 02:31:25 +00006136 i++;
Alexander Duyck729739b2012-02-08 07:51:06 +00006137 tx_desc++;
Alexander Duyckd3d00232011-07-15 02:31:25 +00006138 if (i == tx_ring->count) {
Alexander Duycke4f74022012-01-31 02:59:44 +00006139 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
Alexander Duyckd3d00232011-07-15 02:31:25 +00006140 i = 0;
6141 }
Alexander Duyck729739b2012-02-08 07:51:06 +00006142
6143 dma += IXGBE_MAX_DATA_PER_TXD;
6144 size -= IXGBE_MAX_DATA_PER_TXD;
6145
6146 tx_desc->read.buffer_addr = cpu_to_le64(dma);
6147 tx_desc->read.olinfo_status = 0;
Alexander Duyckd3d00232011-07-15 02:31:25 +00006148 }
6149
Alexander Duyck729739b2012-02-08 07:51:06 +00006150 if (likely(!data_len))
6151 break;
Alexander Duyckd3d00232011-07-15 02:31:25 +00006152
Alexander Duyckd3d00232011-07-15 02:31:25 +00006153 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
Alexander Duyckd3d00232011-07-15 02:31:25 +00006154
Alexander Duyck729739b2012-02-08 07:51:06 +00006155 i++;
6156 tx_desc++;
6157 if (i == tx_ring->count) {
6158 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
6159 i = 0;
6160 }
Auke Kok9a799d72007-09-15 14:07:45 -07006161
Alexander Duyckd3d00232011-07-15 02:31:25 +00006162#ifdef IXGBE_FCOE
Eric Dumazet9e903e02011-10-18 21:00:24 +00006163 size = min_t(unsigned int, data_len, skb_frag_size(frag));
Alexander Duyckd3d00232011-07-15 02:31:25 +00006164#else
Eric Dumazet9e903e02011-10-18 21:00:24 +00006165 size = skb_frag_size(frag);
Alexander Duyckd3d00232011-07-15 02:31:25 +00006166#endif
6167 data_len -= size;
Auke Kok9a799d72007-09-15 14:07:45 -07006168
Alexander Duyck729739b2012-02-08 07:51:06 +00006169 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
6170 DMA_TO_DEVICE);
6171 if (dma_mapping_error(tx_ring->dev, dma))
Alexander Duyckd3d00232011-07-15 02:31:25 +00006172 goto dma_error;
Auke Kok9a799d72007-09-15 14:07:45 -07006173
Alexander Duyck729739b2012-02-08 07:51:06 +00006174 tx_buffer = &tx_ring->tx_buffer_info[i];
6175 dma_unmap_len_set(tx_buffer, len, size);
6176 dma_unmap_addr_set(tx_buffer, dma, dma);
6177
6178 tx_desc->read.buffer_addr = cpu_to_le64(dma);
6179 tx_desc->read.olinfo_status = 0;
6180
6181 frag++;
Auke Kok9a799d72007-09-15 14:07:45 -07006182 }
Alexander Duyck44df32c2009-03-31 21:34:23 +00006183
Alexander Duyck729739b2012-02-08 07:51:06 +00006184 /* write last descriptor with RS and EOP bits */
6185 cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD);
6186 tx_desc->read.cmd_type_len = cmd_type;
Alexander Duyckd3d00232011-07-15 02:31:25 +00006187
Alexander Duyck091a6242012-02-08 07:51:01 +00006188 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
Alexander Duyckb2d96e02012-02-07 08:14:33 +00006189
Alexander Duyckd3d00232011-07-15 02:31:25 +00006190 /* set the timestamp */
6191 first->time_stamp = jiffies;
Auke Kok9a799d72007-09-15 14:07:45 -07006192
6193 /*
Alexander Duyck729739b2012-02-08 07:51:06 +00006194 * Force memory writes to complete before letting h/w know there
6195 * are new descriptors to fetch. (Only applicable for weak-ordered
6196 * memory model archs, such as IA-64).
6197 *
6198 * We also need this memory barrier to make certain all of the
6199 * status bits have been updated before next_to_watch is written.
Auke Kok9a799d72007-09-15 14:07:45 -07006200 */
6201 wmb();
6202
Alexander Duyckd3d00232011-07-15 02:31:25 +00006203 /* set next_to_watch value indicating a packet is present */
6204 first->next_to_watch = tx_desc;
6205
Alexander Duyck729739b2012-02-08 07:51:06 +00006206 i++;
6207 if (i == tx_ring->count)
6208 i = 0;
6209
6210 tx_ring->next_to_use = i;
6211
Alexander Duyckd3d00232011-07-15 02:31:25 +00006212 /* notify HW of packet */
Alexander Duyck84ea2592010-11-16 19:26:49 -08006213 writel(i, tx_ring->tail);
Alexander Duyckd3d00232011-07-15 02:31:25 +00006214
6215 return;
6216dma_error:
Alexander Duyck729739b2012-02-08 07:51:06 +00006217 dev_err(tx_ring->dev, "TX DMA map failed\n");
Alexander Duyckd3d00232011-07-15 02:31:25 +00006218
6219 /* clear dma mappings for failed tx_buffer_info map */
6220 for (;;) {
Alexander Duyck729739b2012-02-08 07:51:06 +00006221 tx_buffer = &tx_ring->tx_buffer_info[i];
6222 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer);
6223 if (tx_buffer == first)
Alexander Duyckd3d00232011-07-15 02:31:25 +00006224 break;
6225 if (i == 0)
6226 i = tx_ring->count;
6227 i--;
6228 }
6229
Alexander Duyckd3d00232011-07-15 02:31:25 +00006230 tx_ring->next_to_use = i;
Auke Kok9a799d72007-09-15 14:07:45 -07006231}
6232
Alexander Duyckfd0db0e2012-02-08 07:50:56 +00006233static void ixgbe_atr(struct ixgbe_ring *ring,
Alexander Duyck244e27a2012-02-08 07:51:11 +00006234 struct ixgbe_tx_buffer *first)
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00006235{
Alexander Duyck69830522011-01-06 14:29:58 +00006236 struct ixgbe_q_vector *q_vector = ring->q_vector;
6237 union ixgbe_atr_hash_dword input = { .dword = 0 };
6238 union ixgbe_atr_hash_dword common = { .dword = 0 };
6239 union {
6240 unsigned char *network;
6241 struct iphdr *ipv4;
6242 struct ipv6hdr *ipv6;
6243 } hdr;
Alexander Duyckee9e0f02010-11-16 19:27:01 -08006244 struct tcphdr *th;
Alexander Duyck905e4a42011-01-06 14:29:57 +00006245 __be16 vlan_id;
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00006246
Alexander Duyck69830522011-01-06 14:29:58 +00006247 /* if ring doesn't have a interrupt vector, cannot perform ATR */
6248 if (!q_vector)
Guillaume Gaudonvilled3ead242010-06-29 18:29:00 +00006249 return;
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00006250
Alexander Duyck69830522011-01-06 14:29:58 +00006251 /* do nothing if sampling is disabled */
6252 if (!ring->atr_sample_rate)
6253 return;
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00006254
Alexander Duyck69830522011-01-06 14:29:58 +00006255 ring->atr_count++;
6256
6257 /* snag network header to get L4 type and address */
Alexander Duyckfd0db0e2012-02-08 07:50:56 +00006258 hdr.network = skb_network_header(first->skb);
Alexander Duyck69830522011-01-06 14:29:58 +00006259
6260 /* Currently only IPv4/IPv6 with TCP is supported */
Alexander Duyck244e27a2012-02-08 07:51:11 +00006261 if ((first->protocol != __constant_htons(ETH_P_IPV6) ||
Alexander Duyck69830522011-01-06 14:29:58 +00006262 hdr.ipv6->nexthdr != IPPROTO_TCP) &&
Alexander Duyck244e27a2012-02-08 07:51:11 +00006263 (first->protocol != __constant_htons(ETH_P_IP) ||
Alexander Duyck69830522011-01-06 14:29:58 +00006264 hdr.ipv4->protocol != IPPROTO_TCP))
6265 return;
Alexander Duyckee9e0f02010-11-16 19:27:01 -08006266
Alexander Duyckfd0db0e2012-02-08 07:50:56 +00006267 th = tcp_hdr(first->skb);
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00006268
Alexander Duyck66f32a82011-06-29 05:43:22 +00006269 /* skip this packet since it is invalid or the socket is closing */
6270 if (!th || th->fin)
Alexander Duyck69830522011-01-06 14:29:58 +00006271 return;
6272
6273 /* sample on all syn packets or once every atr sample count */
6274 if (!th->syn && (ring->atr_count < ring->atr_sample_rate))
6275 return;
6276
6277 /* reset sample count */
6278 ring->atr_count = 0;
6279
Alexander Duyck244e27a2012-02-08 07:51:11 +00006280 vlan_id = htons(first->tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT);
Alexander Duyck69830522011-01-06 14:29:58 +00006281
6282 /*
6283 * src and dst are inverted, think how the receiver sees them
6284 *
6285 * The input is broken into two sections, a non-compressed section
6286 * containing vm_pool, vlan_id, and flow_type. The rest of the data
6287 * is XORed together and stored in the compressed dword.
6288 */
6289 input.formatted.vlan_id = vlan_id;
6290
6291 /*
6292 * since src port and flex bytes occupy the same word XOR them together
6293 * and write the value to source port portion of compressed dword
6294 */
Alexander Duyck244e27a2012-02-08 07:51:11 +00006295 if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN))
Alexander Duyck69830522011-01-06 14:29:58 +00006296 common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q);
6297 else
Alexander Duyck244e27a2012-02-08 07:51:11 +00006298 common.port.src ^= th->dest ^ first->protocol;
Alexander Duyck69830522011-01-06 14:29:58 +00006299 common.port.dst ^= th->source;
6300
Alexander Duyck244e27a2012-02-08 07:51:11 +00006301 if (first->protocol == __constant_htons(ETH_P_IP)) {
Alexander Duyck69830522011-01-06 14:29:58 +00006302 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
6303 common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
6304 } else {
6305 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
6306 common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^
6307 hdr.ipv6->saddr.s6_addr32[1] ^
6308 hdr.ipv6->saddr.s6_addr32[2] ^
6309 hdr.ipv6->saddr.s6_addr32[3] ^
6310 hdr.ipv6->daddr.s6_addr32[0] ^
6311 hdr.ipv6->daddr.s6_addr32[1] ^
6312 hdr.ipv6->daddr.s6_addr32[2] ^
6313 hdr.ipv6->daddr.s6_addr32[3];
6314 }
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00006315
6316 /* This assumes the Rx queue and Tx queue are bound to the same CPU */
Alexander Duyck69830522011-01-06 14:29:58 +00006317 ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
6318 input, common, ring->queue_index);
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00006319}
6320
Alexander Duyck63544e92011-05-27 05:31:42 +00006321static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08006322{
Alexander Duyckfc77dc32010-11-16 19:26:51 -08006323 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08006324 /* Herbert's original patch had:
6325 * smp_mb__after_netif_stop_queue();
6326 * but since that doesn't exist yet, just open code it. */
6327 smp_mb();
6328
6329 /* We need to check again in a case another CPU has just
6330 * made room available. */
Alexander Duyck7d4987d2011-05-27 05:31:37 +00006331 if (likely(ixgbe_desc_unused(tx_ring) < size))
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08006332 return -EBUSY;
6333
6334 /* A reprieve! - use start_queue because it doesn't call schedule */
Alexander Duyckfc77dc32010-11-16 19:26:51 -08006335 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
Alexander Duyck5b7da512010-11-16 19:26:50 -08006336 ++tx_ring->tx_stats.restart_queue;
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08006337 return 0;
6338}
6339
Alexander Duyck82d4e462011-06-11 01:44:58 +00006340static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08006341{
Alexander Duyck7d4987d2011-05-27 05:31:37 +00006342 if (likely(ixgbe_desc_unused(tx_ring) >= size))
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08006343 return 0;
Alexander Duyckfc77dc32010-11-16 19:26:51 -08006344 return __ixgbe_maybe_stop_tx(tx_ring, size);
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08006345}
6346
Stephen Hemminger09a3b1f2009-03-21 13:40:01 -07006347static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
6348{
6349 struct ixgbe_adapter *adapter = netdev_priv(dev);
Alexander Duyck64407522011-06-11 01:44:53 +00006350 int txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
6351 smp_processor_id();
John Fastabend56075a92010-07-26 20:41:31 +00006352#ifdef IXGBE_FCOE
Alexander Duyck64407522011-06-11 01:44:53 +00006353 __be16 protocol = vlan_get_protocol(skb);
Hao Zheng5e09a102010-11-11 13:47:59 +00006354
John Fastabende5b64632011-03-08 03:44:52 +00006355 if (((protocol == htons(ETH_P_FCOE)) ||
6356 (protocol == htons(ETH_P_FIP))) &&
6357 (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) {
Alexander Duyckc0876632012-05-10 00:01:46 +00006358 struct ixgbe_ring_feature *f;
6359
6360 f = &adapter->ring_feature[RING_F_FCOE];
6361
6362 while (txq >= f->indices)
6363 txq -= f->indices;
Alexander Duycke4b317e2012-05-05 05:30:53 +00006364 txq += adapter->ring_feature[RING_F_FCOE].offset;
Alexander Duyckc0876632012-05-10 00:01:46 +00006365
John Fastabende5b64632011-03-08 03:44:52 +00006366 return txq;
John Fastabend56075a92010-07-26 20:41:31 +00006367 }
6368#endif
6369
Krishna Kumarfdd3d632010-02-03 13:13:10 +00006370 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
6371 while (unlikely(txq >= dev->real_num_tx_queues))
6372 txq -= dev->real_num_tx_queues;
Yi Zou5f715822009-12-03 11:32:44 +00006373 return txq;
Krishna Kumarfdd3d632010-02-03 13:13:10 +00006374 }
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00006375
Stephen Hemminger09a3b1f2009-03-21 13:40:01 -07006376 return skb_tx_hash(dev, skb);
6377}
6378
Alexander Duyckfc77dc32010-11-16 19:26:51 -08006379netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
Alexander Duyck84418e32010-08-19 13:40:54 +00006380 struct ixgbe_adapter *adapter,
6381 struct ixgbe_ring *tx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07006382{
Alexander Duyckd3d00232011-07-15 02:31:25 +00006383 struct ixgbe_tx_buffer *first;
Yi Zou5f715822009-12-03 11:32:44 +00006384 int tso;
Alexander Duyckd3d00232011-07-15 02:31:25 +00006385 u32 tx_flags = 0;
Alexander Duycka535c302011-05-27 05:31:52 +00006386#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
6387 unsigned short f;
6388#endif
Alexander Duycka535c302011-05-27 05:31:52 +00006389 u16 count = TXD_USE_COUNT(skb_headlen(skb));
Alexander Duyck66f32a82011-06-29 05:43:22 +00006390 __be16 protocol = skb->protocol;
Alexander Duyck63544e92011-05-27 05:31:42 +00006391 u8 hdr_len = 0;
Hao Zheng5e09a102010-11-11 13:47:59 +00006392
Alexander Duycka535c302011-05-27 05:31:52 +00006393 /*
6394 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
Alexander Duyck24ddd962012-02-10 02:08:32 +00006395 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
Alexander Duycka535c302011-05-27 05:31:52 +00006396 * + 2 desc gap to keep tail from touching head,
6397 * + 1 desc for context descriptor,
6398 * otherwise try next time
6399 */
6400#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
6401 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
6402 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
6403#else
6404 count += skb_shinfo(skb)->nr_frags;
6405#endif
6406 if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) {
6407 tx_ring->tx_stats.tx_busy++;
6408 return NETDEV_TX_BUSY;
6409 }
6410
Alexander Duyckfd0db0e2012-02-08 07:50:56 +00006411 /* record the location of the first descriptor for this packet */
6412 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
6413 first->skb = skb;
Alexander Duyck091a6242012-02-08 07:51:01 +00006414 first->bytecount = skb->len;
6415 first->gso_segs = 1;
Alexander Duyckfd0db0e2012-02-08 07:50:56 +00006416
Alexander Duyck66f32a82011-06-29 05:43:22 +00006417 /* if we have a HW VLAN tag being added default to the HW one */
Jesse Grosseab6d182010-10-20 13:56:03 +00006418 if (vlan_tx_tag_present(skb)) {
Alexander Duyck66f32a82011-06-29 05:43:22 +00006419 tx_flags |= vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
6420 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
6421 /* else if it is a SW VLAN check the next protocol and store the tag */
6422 } else if (protocol == __constant_htons(ETH_P_8021Q)) {
6423 struct vlan_hdr *vhdr, _vhdr;
6424 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
6425 if (!vhdr)
6426 goto out_drop;
6427
6428 protocol = vhdr->h_vlan_encapsulated_proto;
Alexander Duyck9e0c5642012-02-08 07:49:33 +00006429 tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
6430 IXGBE_TX_FLAGS_VLAN_SHIFT;
Alexander Duyck66f32a82011-06-29 05:43:22 +00006431 tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
Auke Kok9a799d72007-09-15 14:07:45 -07006432 }
Yi Zoueacd73f2009-05-13 13:11:06 +00006433
Jacob Kelleraa7bd462012-05-04 01:55:23 +00006434 skb_tx_timestamp(skb);
6435
Jacob Keller3a6a4ed2012-05-01 05:24:58 +00006436 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
6437 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
6438 tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
6439 }
Jacob Keller3a6a4ed2012-05-01 05:24:58 +00006440
Alexander Duyck9e0c5642012-02-08 07:49:33 +00006441#ifdef CONFIG_PCI_IOV
6442 /*
6443 * Use the l2switch_enable flag - would be false if the DMA
6444 * Tx switch had been disabled.
6445 */
6446 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
6447 tx_flags |= IXGBE_TX_FLAGS_TXSW;
6448
6449#endif
John Fastabend32701dc2011-09-27 03:51:56 +00006450 /* DCB maps skb priorities 0-7 onto 3 bit PCP of VLAN tag. */
Alexander Duyck66f32a82011-06-29 05:43:22 +00006451 if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
Alexander Duyck09dca472011-07-20 00:09:10 +00006452 ((tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | IXGBE_TX_FLAGS_SW_VLAN)) ||
6453 (skb->priority != TC_PRIO_CONTROL))) {
Alexander Duyck66f32a82011-06-29 05:43:22 +00006454 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
John Fastabend32701dc2011-09-27 03:51:56 +00006455 tx_flags |= (skb->priority & 0x7) <<
6456 IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT;
Alexander Duyck66f32a82011-06-29 05:43:22 +00006457 if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) {
6458 struct vlan_ethhdr *vhdr;
6459 if (skb_header_cloned(skb) &&
6460 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6461 goto out_drop;
6462 vhdr = (struct vlan_ethhdr *)skb->data;
6463 vhdr->h_vlan_TCI = htons(tx_flags >>
6464 IXGBE_TX_FLAGS_VLAN_SHIFT);
6465 } else {
6466 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
6467 }
6468 }
Alexander Duycka535c302011-05-27 05:31:52 +00006469
Alexander Duyck244e27a2012-02-08 07:51:11 +00006470 /* record initial flags and protocol */
6471 first->tx_flags = tx_flags;
6472 first->protocol = protocol;
6473
Yi Zoueacd73f2009-05-13 13:11:06 +00006474#ifdef IXGBE_FCOE
Alexander Duyck66f32a82011-06-29 05:43:22 +00006475 /* setup tx offload for FCoE */
6476 if ((protocol == __constant_htons(ETH_P_FCOE)) &&
Alexander Duycka58915c2012-05-25 06:38:18 +00006477 (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) {
Alexander Duyck244e27a2012-02-08 07:51:11 +00006478 tso = ixgbe_fso(tx_ring, first, &hdr_len);
Alexander Duyck897ab152011-05-27 05:31:47 +00006479 if (tso < 0)
6480 goto out_drop;
Auke Kok9a799d72007-09-15 14:07:45 -07006481
Alexander Duyck66f32a82011-06-29 05:43:22 +00006482 goto xmit_fcoe;
Alexander Duyck44df32c2009-03-31 21:34:23 +00006483 }
Auke Kok9a799d72007-09-15 14:07:45 -07006484
Auke Kok9a799d72007-09-15 14:07:45 -07006485#endif /* IXGBE_FCOE */
Alexander Duyck244e27a2012-02-08 07:51:11 +00006486 tso = ixgbe_tso(tx_ring, first, &hdr_len);
Alexander Duyck66f32a82011-06-29 05:43:22 +00006487 if (tso < 0)
Auke Kok9a799d72007-09-15 14:07:45 -07006488 goto out_drop;
Alexander Duyck244e27a2012-02-08 07:51:11 +00006489 else if (!tso)
6490 ixgbe_tx_csum(tx_ring, first);
Alexander Duyck66f32a82011-06-29 05:43:22 +00006491
6492 /* add the ATR filter if ATR is on */
6493 if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
Alexander Duyck244e27a2012-02-08 07:51:11 +00006494 ixgbe_atr(tx_ring, first);
Alexander Duyck66f32a82011-06-29 05:43:22 +00006495
6496#ifdef IXGBE_FCOE
6497xmit_fcoe:
6498#endif /* IXGBE_FCOE */
Alexander Duyck244e27a2012-02-08 07:51:11 +00006499 ixgbe_tx_map(tx_ring, first, hdr_len);
Alexander Duyckd3d00232011-07-15 02:31:25 +00006500
6501 ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
Auke Kok9a799d72007-09-15 14:07:45 -07006502
6503 return NETDEV_TX_OK;
Alexander Duyck897ab152011-05-27 05:31:47 +00006504
6505out_drop:
Alexander Duyckfd0db0e2012-02-08 07:50:56 +00006506 dev_kfree_skb_any(first->skb);
6507 first->skb = NULL;
6508
Alexander Duyck897ab152011-05-27 05:31:47 +00006509 return NETDEV_TX_OK;
Auke Kok9a799d72007-09-15 14:07:45 -07006510}
6511
Alexander Duycka50c29d2012-02-08 07:50:40 +00006512static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
6513 struct net_device *netdev)
Auke Kok9a799d72007-09-15 14:07:45 -07006514{
6515 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07006516 struct ixgbe_ring *tx_ring;
Auke Kok9a799d72007-09-15 14:07:45 -07006517
Alexander Duycka50c29d2012-02-08 07:50:40 +00006518 /*
6519 * The minimum packet size for olinfo paylen is 17 so pad the skb
6520 * in order to meet this minimum size requirement.
6521 */
Stephen Hemmingerf73332f2012-06-21 02:15:10 +00006522 if (unlikely(skb->len < 17)) {
6523 if (skb_pad(skb, 17 - skb->len))
Alexander Duycka50c29d2012-02-08 07:50:40 +00006524 return NETDEV_TX_OK;
6525 skb->len = 17;
Tushar Dave71a49f72012-09-14 04:24:49 +00006526 skb_set_tail_pointer(skb, 17);
Alexander Duycka50c29d2012-02-08 07:50:40 +00006527 }
6528
Auke Kok9a799d72007-09-15 14:07:45 -07006529 tx_ring = adapter->tx_ring[skb->queue_mapping];
6530 return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
6531}
6532
6533/**
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07006534 * ixgbe_set_mac - Change the Ethernet Address of the NIC
Auke Kok9a799d72007-09-15 14:07:45 -07006535 * @netdev: network interface device structure
Greg Rose1cdd1ec2010-01-09 02:26:46 +00006536 * @p: pointer to an address structure
6537 *
Auke Kok9a799d72007-09-15 14:07:45 -07006538 * Returns 0 on success, negative on failure
6539 **/
6540static int ixgbe_set_mac(struct net_device *netdev, void *p)
6541{
Ben Hutchings6b73e102009-04-29 08:08:58 +00006542 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6543 struct ixgbe_hw *hw = &adapter->hw;
6544 struct sockaddr *addr = p;
6545
6546 if (!is_valid_ether_addr(addr->sa_data))
6547 return -EADDRNOTAVAIL;
6548
6549 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
6550 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
6551
Alexander Duyck1d9c0bf2012-05-05 05:32:21 +00006552 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV);
Auke Kok9a799d72007-09-15 14:07:45 -07006553
6554 return 0;
6555}
6556
Ben Hutchings6b73e102009-04-29 08:08:58 +00006557static int
6558ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
6559{
6560 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6561 struct ixgbe_hw *hw = &adapter->hw;
6562 u16 value;
6563 int rc;
6564
6565 if (prtad != hw->phy.mdio.prtad)
6566 return -EINVAL;
6567 rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
6568 if (!rc)
6569 rc = value;
6570 return rc;
6571}
6572
6573static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
6574 u16 addr, u16 value)
6575{
6576 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6577 struct ixgbe_hw *hw = &adapter->hw;
6578
6579 if (prtad != hw->phy.mdio.prtad)
6580 return -EINVAL;
6581 return hw->phy.ops.write_reg(hw, addr, devad, value);
6582}
6583
6584static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
6585{
6586 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6587
Jacob Keller3a6a4ed2012-05-01 05:24:58 +00006588 switch (cmd) {
Jacob Keller3a6a4ed2012-05-01 05:24:58 +00006589 case SIOCSHWTSTAMP:
6590 return ixgbe_ptp_hwtstamp_ioctl(adapter, req, cmd);
Jacob Keller3a6a4ed2012-05-01 05:24:58 +00006591 default:
6592 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
6593 }
Ben Hutchings6b73e102009-04-29 08:08:58 +00006594}
6595
PJ Waskiewicz0365e6e2009-05-17 12:32:25 +00006596/**
6597 * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding
Jiri Pirko31278e72009-06-17 01:12:19 +00006598 * netdev->dev_addrs
PJ Waskiewicz0365e6e2009-05-17 12:32:25 +00006599 * @netdev: network interface device structure
6600 *
6601 * Returns non-zero on failure
6602 **/
6603static int ixgbe_add_sanmac_netdev(struct net_device *dev)
6604{
6605 int err = 0;
6606 struct ixgbe_adapter *adapter = netdev_priv(dev);
Alexander Duyck7fa7c9d2012-05-05 05:32:52 +00006607 struct ixgbe_hw *hw = &adapter->hw;
PJ Waskiewicz0365e6e2009-05-17 12:32:25 +00006608
Alexander Duyck7fa7c9d2012-05-05 05:32:52 +00006609 if (is_valid_ether_addr(hw->mac.san_addr)) {
PJ Waskiewicz0365e6e2009-05-17 12:32:25 +00006610 rtnl_lock();
Alexander Duyck7fa7c9d2012-05-05 05:32:52 +00006611 err = dev_addr_add(dev, hw->mac.san_addr, NETDEV_HW_ADDR_T_SAN);
PJ Waskiewicz0365e6e2009-05-17 12:32:25 +00006612 rtnl_unlock();
Alexander Duyck7fa7c9d2012-05-05 05:32:52 +00006613
6614 /* update SAN MAC vmdq pool selection */
6615 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
PJ Waskiewicz0365e6e2009-05-17 12:32:25 +00006616 }
6617 return err;
6618}
6619
6620/**
6621 * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding
Jiri Pirko31278e72009-06-17 01:12:19 +00006622 * netdev->dev_addrs
PJ Waskiewicz0365e6e2009-05-17 12:32:25 +00006623 * @netdev: network interface device structure
6624 *
6625 * Returns non-zero on failure
6626 **/
6627static int ixgbe_del_sanmac_netdev(struct net_device *dev)
6628{
6629 int err = 0;
6630 struct ixgbe_adapter *adapter = netdev_priv(dev);
6631 struct ixgbe_mac_info *mac = &adapter->hw.mac;
6632
6633 if (is_valid_ether_addr(mac->san_addr)) {
6634 rtnl_lock();
6635 err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
6636 rtnl_unlock();
6637 }
6638 return err;
6639}
6640
Auke Kok9a799d72007-09-15 14:07:45 -07006641#ifdef CONFIG_NET_POLL_CONTROLLER
6642/*
6643 * Polling 'interrupt' - used by things like netconsole to send skbs
6644 * without having to re-enable interrupts. It's not called while
6645 * the interrupt routine is executing.
6646 */
6647static void ixgbe_netpoll(struct net_device *netdev)
6648{
6649 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Peter P Waskiewicz Jr8f9a7162009-07-30 12:25:09 +00006650 int i;
Auke Kok9a799d72007-09-15 14:07:45 -07006651
Alexander Duyck1a647bd2010-01-13 01:49:13 +00006652 /* if interface is down do nothing */
6653 if (test_bit(__IXGBE_DOWN, &adapter->state))
6654 return;
6655
Auke Kok9a799d72007-09-15 14:07:45 -07006656 adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
Peter P Waskiewicz Jr8f9a7162009-07-30 12:25:09 +00006657 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
Alexander Duyck49c7ffb2012-05-05 05:30:43 +00006658 for (i = 0; i < adapter->num_q_vectors; i++)
6659 ixgbe_msix_clean_rings(0, adapter->q_vector[i]);
Peter P Waskiewicz Jr8f9a7162009-07-30 12:25:09 +00006660 } else {
6661 ixgbe_intr(adapter->pdev->irq, netdev);
6662 }
Auke Kok9a799d72007-09-15 14:07:45 -07006663 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
Auke Kok9a799d72007-09-15 14:07:45 -07006664}
Auke Kok9a799d72007-09-15 14:07:45 -07006665
Alexander Duyck581330b2012-02-08 07:51:47 +00006666#endif
Eric Dumazetde1036b2010-10-20 23:00:04 +00006667static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
6668 struct rtnl_link_stats64 *stats)
6669{
6670 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6671 int i;
6672
Eric Dumazet1a515022010-11-16 19:26:42 -08006673 rcu_read_lock();
Eric Dumazetde1036b2010-10-20 23:00:04 +00006674 for (i = 0; i < adapter->num_rx_queues; i++) {
Eric Dumazet1a515022010-11-16 19:26:42 -08006675 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
Eric Dumazetde1036b2010-10-20 23:00:04 +00006676 u64 bytes, packets;
6677 unsigned int start;
6678
Eric Dumazet1a515022010-11-16 19:26:42 -08006679 if (ring) {
6680 do {
6681 start = u64_stats_fetch_begin_bh(&ring->syncp);
6682 packets = ring->stats.packets;
6683 bytes = ring->stats.bytes;
6684 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
6685 stats->rx_packets += packets;
6686 stats->rx_bytes += bytes;
6687 }
Eric Dumazetde1036b2010-10-20 23:00:04 +00006688 }
Eric Dumazet1ac9ad12011-01-12 12:13:14 +00006689
6690 for (i = 0; i < adapter->num_tx_queues; i++) {
6691 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]);
6692 u64 bytes, packets;
6693 unsigned int start;
6694
6695 if (ring) {
6696 do {
6697 start = u64_stats_fetch_begin_bh(&ring->syncp);
6698 packets = ring->stats.packets;
6699 bytes = ring->stats.bytes;
6700 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
6701 stats->tx_packets += packets;
6702 stats->tx_bytes += bytes;
6703 }
6704 }
Eric Dumazet1a515022010-11-16 19:26:42 -08006705 rcu_read_unlock();
Eric Dumazetde1036b2010-10-20 23:00:04 +00006706 /* following stats updated by ixgbe_watchdog_task() */
6707 stats->multicast = netdev->stats.multicast;
6708 stats->rx_errors = netdev->stats.rx_errors;
6709 stats->rx_length_errors = netdev->stats.rx_length_errors;
6710 stats->rx_crc_errors = netdev->stats.rx_crc_errors;
6711 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
6712 return stats;
6713}
6714
Jeff Kirsher8af3c332012-02-18 07:08:14 +00006715#ifdef CONFIG_IXGBE_DCB
Ben Hutchings49ce9c22012-07-10 10:56:00 +00006716/**
6717 * ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid.
6718 * @adapter: pointer to ixgbe_adapter
John Fastabend8b1c0b22011-05-03 02:26:48 +00006719 * @tc: number of traffic classes currently enabled
6720 *
6721 * Configure a valid 802.1Qp to Rx packet buffer mapping ie confirm
6722 * 802.1Q priority maps to a packet buffer that exists.
6723 */
6724static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc)
6725{
6726 struct ixgbe_hw *hw = &adapter->hw;
6727 u32 reg, rsave;
6728 int i;
6729
6730 /* 82598 have a static priority to TC mapping that can not
6731 * be changed so no validation is needed.
6732 */
6733 if (hw->mac.type == ixgbe_mac_82598EB)
6734 return;
6735
6736 reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
6737 rsave = reg;
6738
6739 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
6740 u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT);
6741
6742 /* If up2tc is out of bounds default to zero */
6743 if (up2tc > tc)
6744 reg &= ~(0x7 << IXGBE_RTRUP2TC_UP_SHIFT);
6745 }
6746
6747 if (reg != rsave)
6748 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
6749
6750 return;
6751}
6752
Ben Hutchings49ce9c22012-07-10 10:56:00 +00006753/**
Alexander Duyck02debdc2012-05-18 06:33:31 +00006754 * ixgbe_set_prio_tc_map - Configure netdev prio tc map
6755 * @adapter: Pointer to adapter struct
6756 *
6757 * Populate the netdev user priority to tc map
6758 */
6759static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
6760{
6761 struct net_device *dev = adapter->netdev;
6762 struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg;
6763 struct ieee_ets *ets = adapter->ixgbe_ieee_ets;
6764 u8 prio;
6765
6766 for (prio = 0; prio < MAX_USER_PRIORITY; prio++) {
6767 u8 tc = 0;
6768
6769 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)
6770 tc = ixgbe_dcb_get_tc_from_up(dcb_cfg, 0, prio);
6771 else if (ets)
6772 tc = ets->prio_tc[prio];
6773
6774 netdev_set_prio_tc_map(dev, prio, tc);
6775 }
6776}
6777
6778/**
Ben Hutchings49ce9c22012-07-10 10:56:00 +00006779 * ixgbe_setup_tc - configure net_device for multiple traffic classes
John Fastabend8b1c0b22011-05-03 02:26:48 +00006780 *
6781 * @netdev: net device to configure
6782 * @tc: number of traffic classes to enable
6783 */
6784int ixgbe_setup_tc(struct net_device *dev, u8 tc)
6785{
John Fastabend8b1c0b22011-05-03 02:26:48 +00006786 struct ixgbe_adapter *adapter = netdev_priv(dev);
6787 struct ixgbe_hw *hw = &adapter->hw;
John Fastabend8b1c0b22011-05-03 02:26:48 +00006788
John Fastabend8b1c0b22011-05-03 02:26:48 +00006789 /* Hardware supports up to 8 traffic classes */
John Fastabend4de2a022011-09-27 03:52:01 +00006790 if (tc > adapter->dcb_cfg.num_tcs.pg_tcs ||
Alexander Duyck581330b2012-02-08 07:51:47 +00006791 (hw->mac.type == ixgbe_mac_82598EB &&
6792 tc < MAX_TRAFFIC_CLASS))
John Fastabend8b1c0b22011-05-03 02:26:48 +00006793 return -EINVAL;
6794
6795 /* Hardware has to reinitialize queues and interrupts to
Stephen Hemminger52f33af2011-12-22 16:34:52 +00006796 * match packet buffer alignment. Unfortunately, the
John Fastabend8b1c0b22011-05-03 02:26:48 +00006797 * hardware is not flexible enough to do this dynamically.
6798 */
6799 if (netif_running(dev))
6800 ixgbe_close(dev);
6801 ixgbe_clear_interrupt_scheme(adapter);
6802
John Fastabende7589ea2011-07-18 22:38:36 +00006803 if (tc) {
John Fastabend8b1c0b22011-05-03 02:26:48 +00006804 netdev_set_num_tc(dev, tc);
Alexander Duyck02debdc2012-05-18 06:33:31 +00006805 ixgbe_set_prio_tc_map(adapter);
6806
John Fastabende7589ea2011-07-18 22:38:36 +00006807 adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
John Fastabende7589ea2011-07-18 22:38:36 +00006808
Alexander Duyck943561d2012-05-09 22:14:44 -07006809 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
6810 adapter->last_lfc_mode = adapter->hw.fc.requested_mode;
John Fastabende7589ea2011-07-18 22:38:36 +00006811 adapter->hw.fc.requested_mode = ixgbe_fc_none;
Alexander Duyck943561d2012-05-09 22:14:44 -07006812 }
John Fastabende7589ea2011-07-18 22:38:36 +00006813 } else {
John Fastabend8b1c0b22011-05-03 02:26:48 +00006814 netdev_reset_tc(dev);
Alexander Duyck02debdc2012-05-18 06:33:31 +00006815
Alexander Duyck943561d2012-05-09 22:14:44 -07006816 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
6817 adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
John Fastabende7589ea2011-07-18 22:38:36 +00006818
6819 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
John Fastabende7589ea2011-07-18 22:38:36 +00006820
6821 adapter->temp_dcb_cfg.pfc_mode_enable = false;
6822 adapter->dcb_cfg.pfc_mode_enable = false;
6823 }
6824
John Fastabend8b1c0b22011-05-03 02:26:48 +00006825 ixgbe_init_interrupt_scheme(adapter);
6826 ixgbe_validate_rtr(adapter, tc);
6827 if (netif_running(dev))
6828 ixgbe_open(dev);
6829
6830 return 0;
6831}
Eric Dumazetde1036b2010-10-20 23:00:04 +00006832
Jeff Kirsher8af3c332012-02-18 07:08:14 +00006833#endif /* CONFIG_IXGBE_DCB */
Don Skidmore082757a2011-07-21 05:55:00 +00006834void ixgbe_do_reset(struct net_device *netdev)
6835{
6836 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6837
6838 if (netif_running(netdev))
6839 ixgbe_reinit_locked(adapter);
6840 else
6841 ixgbe_reset(adapter);
6842}
6843
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006844static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
Alexander Duyck567d2de2012-02-11 07:18:57 +00006845 netdev_features_t features)
Don Skidmore082757a2011-07-21 05:55:00 +00006846{
6847 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6848
Don Skidmore082757a2011-07-21 05:55:00 +00006849 /* If Rx checksum is disabled, then RSC/LRO should also be disabled */
Alexander Duyck567d2de2012-02-11 07:18:57 +00006850 if (!(features & NETIF_F_RXCSUM))
6851 features &= ~NETIF_F_LRO;
Don Skidmore082757a2011-07-21 05:55:00 +00006852
Alexander Duyck567d2de2012-02-11 07:18:57 +00006853 /* Turn off LRO if not RSC capable */
6854 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
6855 features &= ~NETIF_F_LRO;
Jacob Keller8e2813f2012-04-21 06:05:40 +00006856
Alexander Duyck567d2de2012-02-11 07:18:57 +00006857 return features;
Don Skidmore082757a2011-07-21 05:55:00 +00006858}
6859
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006860static int ixgbe_set_features(struct net_device *netdev,
Alexander Duyck567d2de2012-02-11 07:18:57 +00006861 netdev_features_t features)
Don Skidmore082757a2011-07-21 05:55:00 +00006862{
6863 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Alexander Duyck567d2de2012-02-11 07:18:57 +00006864 netdev_features_t changed = netdev->features ^ features;
Don Skidmore082757a2011-07-21 05:55:00 +00006865 bool need_reset = false;
6866
Don Skidmore082757a2011-07-21 05:55:00 +00006867 /* Make sure RSC matches LRO, reset if change */
Alexander Duyck567d2de2012-02-11 07:18:57 +00006868 if (!(features & NETIF_F_LRO)) {
6869 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
Don Skidmore082757a2011-07-21 05:55:00 +00006870 need_reset = true;
Alexander Duyck567d2de2012-02-11 07:18:57 +00006871 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
6872 } else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
6873 !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
6874 if (adapter->rx_itr_setting == 1 ||
6875 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
6876 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
6877 need_reset = true;
6878 } else if ((changed ^ features) & NETIF_F_LRO) {
6879 e_info(probe, "rx-usecs set too low, "
6880 "disabling RSC\n");
Don Skidmore082757a2011-07-21 05:55:00 +00006881 }
6882 }
6883
6884 /*
6885 * Check if Flow Director n-tuple support was enabled or disabled. If
6886 * the state changed, we need to reset.
6887 */
Alexander Duyck39cb6812012-06-06 05:38:20 +00006888 switch (features & NETIF_F_NTUPLE) {
6889 case NETIF_F_NTUPLE:
Alexander Duyck567d2de2012-02-11 07:18:57 +00006890 /* turn off ATR, enable perfect filters and reset */
Alexander Duyck39cb6812012-06-06 05:38:20 +00006891 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
6892 need_reset = true;
6893
Alexander Duyck567d2de2012-02-11 07:18:57 +00006894 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
6895 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
Alexander Duyck39cb6812012-06-06 05:38:20 +00006896 break;
6897 default:
6898 /* turn off perfect filters, enable ATR and reset */
6899 if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
6900 need_reset = true;
6901
6902 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
6903
6904 /* We cannot enable ATR if SR-IOV is enabled */
6905 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
6906 break;
6907
6908 /* We cannot enable ATR if we have 2 or more traffic classes */
6909 if (netdev_get_num_tc(netdev) > 1)
6910 break;
6911
6912 /* We cannot enable ATR if RSS is disabled */
6913 if (adapter->ring_feature[RING_F_RSS].limit <= 1)
6914 break;
6915
6916 /* A sample rate of 0 indicates ATR disabled */
6917 if (!adapter->atr_sample_rate)
6918 break;
6919
6920 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
6921 break;
Don Skidmore082757a2011-07-21 05:55:00 +00006922 }
6923
John Fastabend146d4cc2012-05-15 05:59:26 +00006924 if (features & NETIF_F_HW_VLAN_RX)
6925 ixgbe_vlan_strip_enable(adapter);
6926 else
6927 ixgbe_vlan_strip_disable(adapter);
6928
Ben Greear3f2d1c02012-03-08 08:28:41 +00006929 if (changed & NETIF_F_RXALL)
6930 need_reset = true;
6931
Alexander Duyck567d2de2012-02-11 07:18:57 +00006932 netdev->features = features;
Don Skidmore082757a2011-07-21 05:55:00 +00006933 if (need_reset)
6934 ixgbe_do_reset(netdev);
6935
6936 return 0;
Don Skidmore082757a2011-07-21 05:55:00 +00006937}
6938
stephen hemmingeredc7d572012-10-01 12:32:33 +00006939static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
John Fastabend0f4b0ad2012-04-15 06:44:19 +00006940 struct net_device *dev,
stephen hemminger6b6e2722012-09-17 10:03:26 +00006941 const unsigned char *addr,
John Fastabend0f4b0ad2012-04-15 06:44:19 +00006942 u16 flags)
6943{
6944 struct ixgbe_adapter *adapter = netdev_priv(dev);
John Fastabend95447462012-05-31 12:42:26 +00006945 int err;
6946
6947 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
6948 return -EOPNOTSUPP;
John Fastabend0f4b0ad2012-04-15 06:44:19 +00006949
6950 if (ndm->ndm_state & NUD_PERMANENT) {
6951 pr_info("%s: FDB only supports static addresses\n",
6952 ixgbe_driver_name);
6953 return -EINVAL;
6954 }
6955
Ben Hutchings46acc462012-11-01 09:11:11 +00006956 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
John Fastabend95447462012-05-31 12:42:26 +00006957 u32 rar_uc_entries = IXGBE_MAX_PF_MACVLANS;
6958
6959 if (netdev_uc_count(dev) < rar_uc_entries)
John Fastabend0f4b0ad2012-04-15 06:44:19 +00006960 err = dev_uc_add_excl(dev, addr);
John Fastabend0f4b0ad2012-04-15 06:44:19 +00006961 else
John Fastabend95447462012-05-31 12:42:26 +00006962 err = -ENOMEM;
6963 } else if (is_multicast_ether_addr(addr)) {
6964 err = dev_mc_add_excl(dev, addr);
6965 } else {
6966 err = -EINVAL;
John Fastabend0f4b0ad2012-04-15 06:44:19 +00006967 }
6968
6969 /* Only return duplicate errors if NLM_F_EXCL is set */
6970 if (err == -EEXIST && !(flags & NLM_F_EXCL))
6971 err = 0;
6972
6973 return err;
6974}
6975
6976static int ixgbe_ndo_fdb_del(struct ndmsg *ndm,
6977 struct net_device *dev,
stephen hemminger6b6e2722012-09-17 10:03:26 +00006978 const unsigned char *addr)
John Fastabend0f4b0ad2012-04-15 06:44:19 +00006979{
6980 struct ixgbe_adapter *adapter = netdev_priv(dev);
6981 int err = -EOPNOTSUPP;
6982
6983 if (ndm->ndm_state & NUD_PERMANENT) {
6984 pr_info("%s: FDB only supports static addresses\n",
6985 ixgbe_driver_name);
6986 return -EINVAL;
6987 }
6988
6989 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
6990 if (is_unicast_ether_addr(addr))
6991 err = dev_uc_del(dev, addr);
6992 else if (is_multicast_ether_addr(addr))
6993 err = dev_mc_del(dev, addr);
6994 else
6995 err = -EINVAL;
6996 }
6997
6998 return err;
6999}
7000
7001static int ixgbe_ndo_fdb_dump(struct sk_buff *skb,
7002 struct netlink_callback *cb,
7003 struct net_device *dev,
7004 int idx)
7005{
7006 struct ixgbe_adapter *adapter = netdev_priv(dev);
7007
7008 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
7009 idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
7010
7011 return idx;
7012}
7013
John Fastabend815cccb2012-10-24 08:13:09 +00007014static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
7015 struct nlmsghdr *nlh)
7016{
7017 struct ixgbe_adapter *adapter = netdev_priv(dev);
7018 struct nlattr *attr, *br_spec;
7019 int rem;
7020
7021 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
7022 return -EOPNOTSUPP;
7023
7024 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
7025
7026 nla_for_each_nested(attr, br_spec, rem) {
7027 __u16 mode;
7028 u32 reg = 0;
7029
7030 if (nla_type(attr) != IFLA_BRIDGE_MODE)
7031 continue;
7032
7033 mode = nla_get_u16(attr);
7034 if (mode == BRIDGE_MODE_VEPA)
7035 reg = 0;
7036 else if (mode == BRIDGE_MODE_VEB)
7037 reg = IXGBE_PFDTXGSWC_VT_LBEN;
7038 else
7039 return -EINVAL;
7040
7041 IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, reg);
7042
7043 e_info(drv, "enabling bridge mode: %s\n",
7044 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
7045 }
7046
7047 return 0;
7048}
7049
7050static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
7051 struct net_device *dev)
7052{
7053 struct ixgbe_adapter *adapter = netdev_priv(dev);
7054 u16 mode;
7055
7056 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
7057 return 0;
7058
7059 if (IXGBE_READ_REG(&adapter->hw, IXGBE_PFDTXGSWC) & 1)
7060 mode = BRIDGE_MODE_VEB;
7061 else
7062 mode = BRIDGE_MODE_VEPA;
7063
7064 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode);
7065}
7066
Stephen Hemminger0edc3522008-11-19 22:24:29 -08007067static const struct net_device_ops ixgbe_netdev_ops = {
Joe Perchese8e9f692010-09-07 21:34:53 +00007068 .ndo_open = ixgbe_open,
Stephen Hemminger0edc3522008-11-19 22:24:29 -08007069 .ndo_stop = ixgbe_close,
Stephen Hemminger00829822008-11-20 20:14:53 -08007070 .ndo_start_xmit = ixgbe_xmit_frame,
Stephen Hemminger09a3b1f2009-03-21 13:40:01 -07007071 .ndo_select_queue = ixgbe_select_queue,
Alexander Duyck581330b2012-02-08 07:51:47 +00007072 .ndo_set_rx_mode = ixgbe_set_rx_mode,
Stephen Hemminger0edc3522008-11-19 22:24:29 -08007073 .ndo_validate_addr = eth_validate_addr,
7074 .ndo_set_mac_address = ixgbe_set_mac,
7075 .ndo_change_mtu = ixgbe_change_mtu,
7076 .ndo_tx_timeout = ixgbe_tx_timeout,
Stephen Hemminger0edc3522008-11-19 22:24:29 -08007077 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
7078 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
Ben Hutchings6b73e102009-04-29 08:08:58 +00007079 .ndo_do_ioctl = ixgbe_ioctl,
Greg Rose7f016482010-05-04 22:12:06 +00007080 .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
7081 .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
7082 .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw,
Alexander Duyck581330b2012-02-08 07:51:47 +00007083 .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk,
Greg Rose7f016482010-05-04 22:12:06 +00007084 .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
Eric Dumazetde1036b2010-10-20 23:00:04 +00007085 .ndo_get_stats64 = ixgbe_get_stats64,
Jeff Kirsher8af3c332012-02-18 07:08:14 +00007086#ifdef CONFIG_IXGBE_DCB
John Fastabend24095aa2011-02-23 05:58:03 +00007087 .ndo_setup_tc = ixgbe_setup_tc,
Jeff Kirsher8af3c332012-02-18 07:08:14 +00007088#endif
Stephen Hemminger0edc3522008-11-19 22:24:29 -08007089#ifdef CONFIG_NET_POLL_CONTROLLER
7090 .ndo_poll_controller = ixgbe_netpoll,
7091#endif
Yi Zou332d4a72009-05-13 13:11:53 +00007092#ifdef IXGBE_FCOE
7093 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
Yi Zou68a683c2011-02-01 07:22:16 +00007094 .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
Yi Zou332d4a72009-05-13 13:11:53 +00007095 .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
Yi Zou8450ff82009-08-31 12:32:14 +00007096 .ndo_fcoe_enable = ixgbe_fcoe_enable,
7097 .ndo_fcoe_disable = ixgbe_fcoe_disable,
Yi Zou61a1fa12009-10-28 18:24:56 +00007098 .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
Neerav Parikhea818752012-01-04 20:23:40 +00007099 .ndo_fcoe_get_hbainfo = ixgbe_fcoe_get_hbainfo,
Yi Zou332d4a72009-05-13 13:11:53 +00007100#endif /* IXGBE_FCOE */
Don Skidmore082757a2011-07-21 05:55:00 +00007101 .ndo_set_features = ixgbe_set_features,
7102 .ndo_fix_features = ixgbe_fix_features,
John Fastabend0f4b0ad2012-04-15 06:44:19 +00007103 .ndo_fdb_add = ixgbe_ndo_fdb_add,
7104 .ndo_fdb_del = ixgbe_ndo_fdb_del,
7105 .ndo_fdb_dump = ixgbe_ndo_fdb_dump,
John Fastabend815cccb2012-10-24 08:13:09 +00007106 .ndo_bridge_setlink = ixgbe_ndo_bridge_setlink,
7107 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
Stephen Hemminger0edc3522008-11-19 22:24:29 -08007108};
7109
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07007110/**
Jacob Keller8e2813f2012-04-21 06:05:40 +00007111 * ixgbe_wol_supported - Check whether device supports WoL
7112 * @hw: hw specific details
7113 * @device_id: the device ID
7114 * @subdev_id: the subsystem device ID
7115 *
7116 * This function is used by probe and ethtool to determine
7117 * which devices have WoL support
7118 *
7119 **/
7120int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
7121 u16 subdevice_id)
7122{
7123 struct ixgbe_hw *hw = &adapter->hw;
7124 u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
7125 int is_wol_supported = 0;
7126
7127 switch (device_id) {
7128 case IXGBE_DEV_ID_82599_SFP:
7129 /* Only these subdevices could supports WOL */
7130 switch (subdevice_id) {
7131 case IXGBE_SUBDEV_ID_82599_560FLR:
7132 /* only support first port */
7133 if (hw->bus.func != 0)
7134 break;
7135 case IXGBE_SUBDEV_ID_82599_SFP:
Don Skidmoreb6dfd932012-07-11 07:17:42 +00007136 case IXGBE_SUBDEV_ID_82599_RNDC:
Emil Tantilovf8a06c22012-08-16 08:13:07 +00007137 case IXGBE_SUBDEV_ID_82599_ECNA_DP:
Jacob Keller8e2813f2012-04-21 06:05:40 +00007138 is_wol_supported = 1;
7139 break;
7140 }
7141 break;
7142 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
7143 /* All except this subdevice support WOL */
7144 if (subdevice_id != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
7145 is_wol_supported = 1;
7146 break;
7147 case IXGBE_DEV_ID_82599_KX4:
7148 is_wol_supported = 1;
7149 break;
7150 case IXGBE_DEV_ID_X540T:
joshua.a.hay@intel.comdf376f02012-09-21 00:08:21 +00007151 case IXGBE_DEV_ID_X540T1:
Jacob Keller8e2813f2012-04-21 06:05:40 +00007152 /* check eeprom to see if enabled wol */
7153 if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
7154 ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
7155 (hw->bus.func == 0))) {
7156 is_wol_supported = 1;
7157 }
7158 break;
7159 }
7160
7161 return is_wol_supported;
7162}
7163
7164/**
Auke Kok9a799d72007-09-15 14:07:45 -07007165 * ixgbe_probe - Device Initialization Routine
7166 * @pdev: PCI device information struct
7167 * @ent: entry in ixgbe_pci_tbl
7168 *
7169 * Returns 0 on success, negative on failure
7170 *
7171 * ixgbe_probe initializes an adapter identified by a pci_dev structure.
7172 * The OS initialization, configuring of the adapter private structure,
7173 * and a hardware reset occur.
7174 **/
7175static int __devinit ixgbe_probe(struct pci_dev *pdev,
Joe Perchese8e9f692010-09-07 21:34:53 +00007176 const struct pci_device_id *ent)
Auke Kok9a799d72007-09-15 14:07:45 -07007177{
7178 struct net_device *netdev;
7179 struct ixgbe_adapter *adapter = NULL;
7180 struct ixgbe_hw *hw;
7181 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
Auke Kok9a799d72007-09-15 14:07:45 -07007182 static int cards_found;
7183 int i, err, pci_using_dac;
Don Skidmore289700db2010-12-03 03:32:58 +00007184 u8 part_str[IXGBE_PBANUM_LENGTH];
John Fastabendc85a2612010-02-25 23:15:21 +00007185 unsigned int indices = num_possible_cpus();
John Fastabend3f4a6f02012-06-05 05:58:52 +00007186 unsigned int dcb_max = 0;
Yi Zoueacd73f2009-05-13 13:11:06 +00007187#ifdef IXGBE_FCOE
7188 u16 device_caps;
7189#endif
Don Skidmore289700db2010-12-03 03:32:58 +00007190 u32 eec;
Auke Kok9a799d72007-09-15 14:07:45 -07007191
Andy Gospodarekbded64a2010-07-21 06:40:31 +00007192 /* Catch broken hardware that put the wrong VF device ID in
7193 * the PCIe SR-IOV capability.
7194 */
7195 if (pdev->is_virtfn) {
7196 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
7197 pci_name(pdev), pdev->vendor, pdev->device);
7198 return -EINVAL;
7199 }
7200
gouji-new9ce77662009-05-06 10:44:45 +00007201 err = pci_enable_device_mem(pdev);
Auke Kok9a799d72007-09-15 14:07:45 -07007202 if (err)
7203 return err;
7204
Nick Nunley1b507732010-04-27 13:10:27 +00007205 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
7206 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
Auke Kok9a799d72007-09-15 14:07:45 -07007207 pci_using_dac = 1;
7208 } else {
Nick Nunley1b507732010-04-27 13:10:27 +00007209 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Auke Kok9a799d72007-09-15 14:07:45 -07007210 if (err) {
Nick Nunley1b507732010-04-27 13:10:27 +00007211 err = dma_set_coherent_mask(&pdev->dev,
7212 DMA_BIT_MASK(32));
Auke Kok9a799d72007-09-15 14:07:45 -07007213 if (err) {
Dan Carpenterb8bc0422010-07-27 00:05:56 +00007214 dev_err(&pdev->dev,
7215 "No usable DMA configuration, aborting\n");
Auke Kok9a799d72007-09-15 14:07:45 -07007216 goto err_dma;
7217 }
7218 }
7219 pci_using_dac = 0;
7220 }
7221
gouji-new9ce77662009-05-06 10:44:45 +00007222 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
Joe Perchese8e9f692010-09-07 21:34:53 +00007223 IORESOURCE_MEM), ixgbe_driver_name);
Auke Kok9a799d72007-09-15 14:07:45 -07007224 if (err) {
Dan Carpenterb8bc0422010-07-27 00:05:56 +00007225 dev_err(&pdev->dev,
7226 "pci_request_selected_regions failed 0x%x\n", err);
Auke Kok9a799d72007-09-15 14:07:45 -07007227 goto err_pci_reg;
7228 }
7229
Frans Pop19d5afd2009-10-02 10:04:12 -07007230 pci_enable_pcie_error_reporting(pdev);
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08007231
Auke Kok9a799d72007-09-15 14:07:45 -07007232 pci_set_master(pdev);
Wendy Xiongfb3b27b2008-04-23 11:09:24 -07007233 pci_save_state(pdev);
Auke Kok9a799d72007-09-15 14:07:45 -07007234
John Fastabende901acd2011-04-26 07:26:08 +00007235#ifdef CONFIG_IXGBE_DCB
John Fastabend3f4a6f02012-06-05 05:58:52 +00007236 if (ii->mac == ixgbe_mac_82598EB)
7237 dcb_max = min_t(unsigned int, indices * MAX_TRAFFIC_CLASS,
7238 IXGBE_MAX_RSS_INDICES);
7239 else
7240 dcb_max = min_t(unsigned int, indices * MAX_TRAFFIC_CLASS,
7241 IXGBE_MAX_FDIR_INDICES);
John Fastabende901acd2011-04-26 07:26:08 +00007242#endif
7243
John Fastabendc85a2612010-02-25 23:15:21 +00007244 if (ii->mac == ixgbe_mac_82598EB)
7245 indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES);
7246 else
7247 indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES);
7248
John Fastabende901acd2011-04-26 07:26:08 +00007249#ifdef IXGBE_FCOE
John Fastabendc85a2612010-02-25 23:15:21 +00007250 indices += min_t(unsigned int, num_possible_cpus(),
7251 IXGBE_MAX_FCOE_INDICES);
7252#endif
John Fastabend3f4a6f02012-06-05 05:58:52 +00007253 indices = max_t(unsigned int, dcb_max, indices);
John Fastabendc85a2612010-02-25 23:15:21 +00007254 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
Auke Kok9a799d72007-09-15 14:07:45 -07007255 if (!netdev) {
7256 err = -ENOMEM;
7257 goto err_alloc_etherdev;
7258 }
7259
Auke Kok9a799d72007-09-15 14:07:45 -07007260 SET_NETDEV_DEV(netdev, &pdev->dev);
7261
Auke Kok9a799d72007-09-15 14:07:45 -07007262 adapter = netdev_priv(netdev);
Alexander Duyckc60fbb02010-11-16 19:26:54 -08007263 pci_set_drvdata(pdev, adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07007264
7265 adapter->netdev = netdev;
7266 adapter->pdev = pdev;
7267 hw = &adapter->hw;
7268 hw->back = adapter;
stephen hemmingerb3f4d592012-03-13 06:04:20 +00007269 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
Auke Kok9a799d72007-09-15 14:07:45 -07007270
Jeff Kirsher05857982008-09-11 19:57:00 -07007271 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
Joe Perchese8e9f692010-09-07 21:34:53 +00007272 pci_resource_len(pdev, 0));
Auke Kok9a799d72007-09-15 14:07:45 -07007273 if (!hw->hw_addr) {
7274 err = -EIO;
7275 goto err_ioremap;
7276 }
7277
Stephen Hemminger0edc3522008-11-19 22:24:29 -08007278 netdev->netdev_ops = &ixgbe_netdev_ops;
Auke Kok9a799d72007-09-15 14:07:45 -07007279 ixgbe_set_ethtool_ops(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07007280 netdev->watchdog_timeo = 5 * HZ;
Don Skidmore9fe93af2010-12-03 09:33:54 +00007281 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
Auke Kok9a799d72007-09-15 14:07:45 -07007282
Auke Kok9a799d72007-09-15 14:07:45 -07007283 adapter->bd_number = cards_found;
7284
Auke Kok9a799d72007-09-15 14:07:45 -07007285 /* Setup hw api */
7286 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08007287 hw->mac.type = ii->mac;
Auke Kok9a799d72007-09-15 14:07:45 -07007288
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07007289 /* EEPROM */
7290 memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
7291 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
7292 /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
7293 if (!(eec & (1 << 8)))
7294 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
7295
7296 /* PHY */
7297 memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
Donald Skidmorec4900be2008-11-20 21:11:42 -08007298 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
Ben Hutchings6b73e102009-04-29 08:08:58 +00007299 /* ixgbe_identify_phy_generic will set prtad and mmds properly */
7300 hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
7301 hw->phy.mdio.mmds = 0;
7302 hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
7303 hw->phy.mdio.dev = netdev;
7304 hw->phy.mdio.mdio_read = ixgbe_mdio_read;
7305 hw->phy.mdio.mdio_write = ixgbe_mdio_write;
Donald Skidmorec4900be2008-11-20 21:11:42 -08007306
Don Skidmore8ca783a2009-05-26 20:40:47 -07007307 ii->get_invariants(hw);
Auke Kok9a799d72007-09-15 14:07:45 -07007308
7309 /* setup the private structure */
7310 err = ixgbe_sw_init(adapter);
7311 if (err)
7312 goto err_sw_init;
7313
Don Skidmoree86bff02010-02-11 04:14:08 +00007314 /* Make it possible the adapter to be woken up via WOL */
Don Skidmoreb93a2222010-11-16 19:27:17 -08007315 switch (adapter->hw.mac.type) {
7316 case ixgbe_mac_82599EB:
7317 case ixgbe_mac_X540:
Don Skidmoree86bff02010-02-11 04:14:08 +00007318 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
Don Skidmoreb93a2222010-11-16 19:27:17 -08007319 break;
7320 default:
7321 break;
7322 }
Don Skidmoree86bff02010-02-11 04:14:08 +00007323
Don Skidmorebf069c92009-05-07 10:39:54 +00007324 /*
7325 * If there is a fan on this device and it has failed log the
7326 * failure.
7327 */
7328 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
7329 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
7330 if (esdp & IXGBE_ESDP_SDP1)
Emil Tantilov396e7992010-07-01 20:05:12 +00007331 e_crit(probe, "Fan has stopped, replace the adapter\n");
Don Skidmorebf069c92009-05-07 10:39:54 +00007332 }
7333
Peter P Waskiewicz Jr8ef78ad2012-02-01 09:19:21 +00007334 if (allow_unsupported_sfp)
7335 hw->allow_unsupported_sfp = allow_unsupported_sfp;
7336
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07007337 /* reset_hw fills in the perm_addr as well */
Mallikarjuna R Chilakala119fc602010-05-20 23:07:06 -07007338 hw->phy.reset_if_overtemp = true;
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07007339 err = hw->mac.ops.reset_hw(hw);
Mallikarjuna R Chilakala119fc602010-05-20 23:07:06 -07007340 hw->phy.reset_if_overtemp = false;
Don Skidmore8ca783a2009-05-26 20:40:47 -07007341 if (err == IXGBE_ERR_SFP_NOT_PRESENT &&
7342 hw->mac.type == ixgbe_mac_82598EB) {
Don Skidmore8ca783a2009-05-26 20:40:47 -07007343 err = 0;
7344 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
Alexander Duyck70864002011-04-27 09:13:56 +00007345 e_dev_err("failed to load because an unsupported SFP+ "
Emil Tantilov849c4542010-06-03 16:53:41 +00007346 "module type was detected.\n");
7347 e_dev_err("Reload the driver after installing a supported "
7348 "module.\n");
PJ Waskiewicz04f165e2009-04-09 22:27:57 +00007349 goto err_sw_init;
7350 } else if (err) {
Emil Tantilov849c4542010-06-03 16:53:41 +00007351 e_dev_err("HW Init failed: %d\n", err);
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07007352 goto err_sw_init;
7353 }
7354
Alexander Duyck99d74482012-05-09 08:09:25 +00007355#ifdef CONFIG_PCI_IOV
7356 ixgbe_enable_sriov(adapter, ii);
Greg Rose1cdd1ec2010-01-09 02:26:46 +00007357
Alexander Duyck99d74482012-05-09 08:09:25 +00007358#endif
Emil Tantilov396e7992010-07-01 20:05:12 +00007359 netdev->features = NETIF_F_SG |
Joe Perchese8e9f692010-09-07 21:34:53 +00007360 NETIF_F_IP_CSUM |
Don Skidmore082757a2011-07-21 05:55:00 +00007361 NETIF_F_IPV6_CSUM |
Joe Perchese8e9f692010-09-07 21:34:53 +00007362 NETIF_F_HW_VLAN_TX |
7363 NETIF_F_HW_VLAN_RX |
Don Skidmore082757a2011-07-21 05:55:00 +00007364 NETIF_F_HW_VLAN_FILTER |
7365 NETIF_F_TSO |
7366 NETIF_F_TSO6 |
Don Skidmore082757a2011-07-21 05:55:00 +00007367 NETIF_F_RXHASH |
7368 NETIF_F_RXCSUM;
Auke Kok9a799d72007-09-15 14:07:45 -07007369
Don Skidmore082757a2011-07-21 05:55:00 +00007370 netdev->hw_features = netdev->features;
Jeff Kirsherad31c402008-06-05 04:05:30 -07007371
Don Skidmore58be7662011-04-12 09:42:11 +00007372 switch (adapter->hw.mac.type) {
7373 case ixgbe_mac_82599EB:
7374 case ixgbe_mac_X540:
Jesse Brandeburg45a5ead2009-04-27 22:36:35 +00007375 netdev->features |= NETIF_F_SCTP_CSUM;
Don Skidmore082757a2011-07-21 05:55:00 +00007376 netdev->hw_features |= NETIF_F_SCTP_CSUM |
7377 NETIF_F_NTUPLE;
Don Skidmore58be7662011-04-12 09:42:11 +00007378 break;
7379 default:
7380 break;
7381 }
Jesse Brandeburg45a5ead2009-04-27 22:36:35 +00007382
Ben Greear3f2d1c02012-03-08 08:28:41 +00007383 netdev->hw_features |= NETIF_F_RXALL;
7384
Jeff Kirsherad31c402008-06-05 04:05:30 -07007385 netdev->vlan_features |= NETIF_F_TSO;
7386 netdev->vlan_features |= NETIF_F_TSO6;
Jesse Brandeburg22f32b7a52008-08-26 04:27:18 -07007387 netdev->vlan_features |= NETIF_F_IP_CSUM;
Alexander Duyckcd1da502009-08-25 04:47:50 +00007388 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
Jeff Kirsherad31c402008-06-05 04:05:30 -07007389 netdev->vlan_features |= NETIF_F_SG;
7390
Jiri Pirko01789342011-08-16 06:29:00 +00007391 netdev->priv_flags |= IFF_UNICAST_FLT;
Ben Greearf43f3132012-03-06 09:42:04 +00007392 netdev->priv_flags |= IFF_SUPP_NOFCS;
Jiri Pirko01789342011-08-16 06:29:00 +00007393
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -08007394#ifdef CONFIG_IXGBE_DCB
Alexander Duyck2f90b862008-11-20 20:52:10 -08007395 netdev->dcbnl_ops = &dcbnl_ops;
7396#endif
7397
Yi Zoueacd73f2009-05-13 13:11:06 +00007398#ifdef IXGBE_FCOE
Yi Zou0d551582009-07-22 14:07:12 +00007399 if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
Yi Zoueacd73f2009-05-13 13:11:06 +00007400 if (hw->mac.ops.get_device_caps) {
7401 hw->mac.ops.get_device_caps(hw, &device_caps);
Yi Zou0d551582009-07-22 14:07:12 +00007402 if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
7403 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
Yi Zoueacd73f2009-05-13 13:11:06 +00007404 }
Alexander Duyck7c8ae652012-05-05 05:32:47 +00007405
7406 adapter->ring_feature[RING_F_FCOE].limit = IXGBE_FCRETA_SIZE;
7407
Alexander Duycka58915c2012-05-25 06:38:18 +00007408 netdev->features |= NETIF_F_FSO |
7409 NETIF_F_FCOE_CRC;
7410
Alexander Duyck7c8ae652012-05-05 05:32:47 +00007411 netdev->vlan_features |= NETIF_F_FSO |
7412 NETIF_F_FCOE_CRC |
7413 NETIF_F_FCOE_MTU;
Yi Zou5e09d7f2010-07-19 13:59:52 +00007414 }
Yi Zoueacd73f2009-05-13 13:11:06 +00007415#endif /* IXGBE_FCOE */
Yi Zou7b872a52010-09-22 17:57:58 +00007416 if (pci_using_dac) {
Auke Kok9a799d72007-09-15 14:07:45 -07007417 netdev->features |= NETIF_F_HIGHDMA;
Yi Zou7b872a52010-09-22 17:57:58 +00007418 netdev->vlan_features |= NETIF_F_HIGHDMA;
7419 }
Auke Kok9a799d72007-09-15 14:07:45 -07007420
Don Skidmore082757a2011-07-21 05:55:00 +00007421 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
7422 netdev->hw_features |= NETIF_F_LRO;
Peter P Waskiewicz Jr0c19d6a2009-07-30 12:25:28 +00007423 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
Alexander Duyckf8212f92009-04-27 22:42:37 +00007424 netdev->features |= NETIF_F_LRO;
7425
Auke Kok9a799d72007-09-15 14:07:45 -07007426 /* make sure the EEPROM is good */
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07007427 if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
Emil Tantilov849c4542010-06-03 16:53:41 +00007428 e_dev_err("The EEPROM Checksum Is Not Valid\n");
Auke Kok9a799d72007-09-15 14:07:45 -07007429 err = -EIO;
Alexander Duyck35937c02012-02-08 07:51:37 +00007430 goto err_sw_init;
Auke Kok9a799d72007-09-15 14:07:45 -07007431 }
7432
7433 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
7434 memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
7435
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07007436 if (ixgbe_validate_mac_addr(netdev->perm_addr)) {
Emil Tantilov849c4542010-06-03 16:53:41 +00007437 e_dev_err("invalid MAC address\n");
Auke Kok9a799d72007-09-15 14:07:45 -07007438 err = -EIO;
Alexander Duyck35937c02012-02-08 07:51:37 +00007439 goto err_sw_init;
Auke Kok9a799d72007-09-15 14:07:45 -07007440 }
7441
Alexander Duyck70864002011-04-27 09:13:56 +00007442 setup_timer(&adapter->service_timer, &ixgbe_service_timer,
Alexander Duyck581330b2012-02-08 07:51:47 +00007443 (unsigned long) adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07007444
Alexander Duyck70864002011-04-27 09:13:56 +00007445 INIT_WORK(&adapter->service_task, ixgbe_service_task);
7446 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
Auke Kok9a799d72007-09-15 14:07:45 -07007447
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08007448 err = ixgbe_init_interrupt_scheme(adapter);
7449 if (err)
7450 goto err_sw_init;
Auke Kok9a799d72007-09-15 14:07:45 -07007451
Jacob Keller8e2813f2012-04-21 06:05:40 +00007452 /* WOL not supported for all devices */
Emil Tantilovc23f5b62011-08-16 07:34:18 +00007453 adapter->wol = 0;
Jacob Keller8e2813f2012-04-21 06:05:40 +00007454 hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap);
7455 if (ixgbe_wol_supported(adapter, pdev->device, pdev->subsystem_device))
Andy Gospodarek9417c462011-07-16 07:31:33 +00007456 adapter->wol = IXGBE_WUFC_MAG;
Emil Tantilovc23f5b62011-08-16 07:34:18 +00007457
PJ Waskiewicze8e26352009-02-27 15:45:05 +00007458 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
7459
Emil Tantilov15e52092011-09-29 05:01:29 +00007460 /* save off EEPROM version number */
7461 hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh);
7462 hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl);
7463
PJ Waskiewicz04f165e2009-04-09 22:27:57 +00007464 /* pick up the PCI bus settings for reporting later */
7465 hw->mac.ops.get_bus_info(hw);
7466
Auke Kok9a799d72007-09-15 14:07:45 -07007467 /* print bus type/speed/width info */
Emil Tantilov849c4542010-06-03 16:53:41 +00007468 e_dev_info("(PCI Express:%s:%s) %pM\n",
Don Skidmore67163442011-04-26 08:00:00 +00007469 (hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0GT/s" :
7470 hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5GT/s" :
Joe Perchese8e9f692010-09-07 21:34:53 +00007471 "Unknown"),
7472 (hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" :
7473 hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" :
7474 hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" :
7475 "Unknown"),
7476 netdev->dev_addr);
Don Skidmore289700db2010-12-03 03:32:58 +00007477
7478 err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH);
7479 if (err)
Don Skidmore9fe93af2010-12-03 09:33:54 +00007480 strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00007481 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
Don Skidmore289700db2010-12-03 03:32:58 +00007482 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
Emil Tantilov849c4542010-06-03 16:53:41 +00007483 hw->mac.type, hw->phy.type, hw->phy.sfp_type,
Don Skidmore289700db2010-12-03 03:32:58 +00007484 part_str);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00007485 else
Don Skidmore289700db2010-12-03 03:32:58 +00007486 e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
7487 hw->mac.type, hw->phy.type, part_str);
Auke Kok9a799d72007-09-15 14:07:45 -07007488
PJ Waskiewicze8e26352009-02-27 15:45:05 +00007489 if (hw->bus.width <= ixgbe_bus_width_pcie_x4) {
Emil Tantilov849c4542010-06-03 16:53:41 +00007490 e_dev_warn("PCI-Express bandwidth available for this card is "
7491 "not sufficient for optimal performance.\n");
7492 e_dev_warn("For optimal performance a x8 PCI-Express slot "
7493 "is required.\n");
Auke Kok0c254d82008-02-11 09:25:56 -08007494 }
7495
Auke Kok9a799d72007-09-15 14:07:45 -07007496 /* reset the hardware with the new settings */
Peter P Waskiewicz Jr794caeb2009-06-04 16:02:24 +00007497 err = hw->mac.ops.start_hw(hw);
Peter P Waskiewicz Jr794caeb2009-06-04 16:02:24 +00007498 if (err == IXGBE_ERR_EEPROM_VERSION) {
7499 /* We are running on a pre-production device, log a warning */
Emil Tantilov849c4542010-06-03 16:53:41 +00007500 e_dev_warn("This device is a pre-production adapter/LOM. "
7501 "Please be aware there may be issues associated "
7502 "with your hardware. If you are experiencing "
7503 "problems please contact your Intel or hardware "
7504 "representative who provided you with this "
7505 "hardware.\n");
Peter P Waskiewicz Jr794caeb2009-06-04 16:02:24 +00007506 }
Auke Kok9a799d72007-09-15 14:07:45 -07007507 strcpy(netdev->name, "eth%d");
7508 err = register_netdev(netdev);
7509 if (err)
7510 goto err_register;
7511
Emil Tantilovec74a472012-09-20 03:33:56 +00007512 /* power down the optics for 82599 SFP+ fiber */
7513 if (hw->mac.ops.disable_tx_laser)
Emil Tantilov93d3ce82011-10-19 07:59:55 +00007514 hw->mac.ops.disable_tx_laser(hw);
7515
Jesse Brandeburg54386462009-04-17 20:44:27 +00007516 /* carrier off reporting is important to ethtool even BEFORE open */
7517 netif_carrier_off(netdev);
7518
Jeff Garzik5dd2d332008-10-16 05:09:31 -04007519#ifdef CONFIG_IXGBE_DCA
Denis V. Lunev652f0932008-03-27 14:39:17 +03007520 if (dca_add_requester(&pdev->dev) == 0) {
Jeb Cramerbd0362d2008-03-03 15:04:02 -08007521 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
Jeb Cramerbd0362d2008-03-03 15:04:02 -08007522 ixgbe_setup_dca(adapter);
7523 }
7524#endif
Greg Rose1cdd1ec2010-01-09 02:26:46 +00007525 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
Emil Tantilov396e7992010-07-01 20:05:12 +00007526 e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs);
Greg Rose1cdd1ec2010-01-09 02:26:46 +00007527 for (i = 0; i < adapter->num_vfs; i++)
7528 ixgbe_vf_configuration(pdev, (i | 0x10000000));
7529 }
7530
Jacob Keller2466dd92011-09-08 03:50:54 +00007531 /* firmware requires driver version to be 0xFFFFFFFF
7532 * since os does not support feature
7533 */
Emil Tantilov9612de92011-05-07 07:40:20 +00007534 if (hw->mac.ops.set_fw_drv_ver)
Jacob Keller2466dd92011-09-08 03:50:54 +00007535 hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF,
7536 0xFF);
Emil Tantilov9612de92011-05-07 07:40:20 +00007537
PJ Waskiewicz0365e6e2009-05-17 12:32:25 +00007538 /* add san mac addr to netdev */
7539 ixgbe_add_sanmac_netdev(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07007540
Neerav Parikhea818752012-01-04 20:23:40 +00007541 e_dev_info("%s\n", ixgbe_default_device_descr);
Auke Kok9a799d72007-09-15 14:07:45 -07007542 cards_found++;
Don Skidmore3ca8bc62012-04-12 00:33:31 +00007543
Don Skidmore12109822012-05-04 06:07:08 +00007544#ifdef CONFIG_IXGBE_HWMON
Don Skidmore3ca8bc62012-04-12 00:33:31 +00007545 if (ixgbe_sysfs_init(adapter))
7546 e_err(probe, "failed to allocate sysfs resources\n");
Don Skidmore12109822012-05-04 06:07:08 +00007547#endif /* CONFIG_IXGBE_HWMON */
Don Skidmore3ca8bc62012-04-12 00:33:31 +00007548
Catherine Sullivan00949162012-08-10 01:59:10 +00007549#ifdef CONFIG_DEBUG_FS
7550 ixgbe_dbg_adapter_init(adapter);
7551#endif /* CONFIG_DEBUG_FS */
7552
Auke Kok9a799d72007-09-15 14:07:45 -07007553 return 0;
7554
7555err_register:
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -08007556 ixgbe_release_hw_control(adapter);
Alexander Duyck7a921c92009-05-06 10:43:28 +00007557 ixgbe_clear_interrupt_scheme(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07007558err_sw_init:
Alexander Duyck99d74482012-05-09 08:09:25 +00007559 ixgbe_disable_sriov(adapter);
Alexander Duyck70864002011-04-27 09:13:56 +00007560 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
Auke Kok9a799d72007-09-15 14:07:45 -07007561 iounmap(hw->hw_addr);
7562err_ioremap:
7563 free_netdev(netdev);
7564err_alloc_etherdev:
Joe Perchese8e9f692010-09-07 21:34:53 +00007565 pci_release_selected_regions(pdev,
7566 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9a799d72007-09-15 14:07:45 -07007567err_pci_reg:
7568err_dma:
7569 pci_disable_device(pdev);
7570 return err;
7571}
7572
7573/**
7574 * ixgbe_remove - Device Removal Routine
7575 * @pdev: PCI device information struct
7576 *
7577 * ixgbe_remove is called by the PCI subsystem to alert the driver
7578 * that it should release a PCI device. The could be caused by a
7579 * Hot-Plug event, or because the driver is going to be removed from
7580 * memory.
7581 **/
7582static void __devexit ixgbe_remove(struct pci_dev *pdev)
7583{
Alexander Duyckc60fbb02010-11-16 19:26:54 -08007584 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
7585 struct net_device *netdev = adapter->netdev;
Auke Kok9a799d72007-09-15 14:07:45 -07007586
Catherine Sullivan00949162012-08-10 01:59:10 +00007587#ifdef CONFIG_DEBUG_FS
7588 ixgbe_dbg_adapter_exit(adapter);
7589#endif /*CONFIG_DEBUG_FS */
7590
Auke Kok9a799d72007-09-15 14:07:45 -07007591 set_bit(__IXGBE_DOWN, &adapter->state);
Alexander Duyck70864002011-04-27 09:13:56 +00007592 cancel_work_sync(&adapter->service_task);
Auke Kok9a799d72007-09-15 14:07:45 -07007593
Jacob Keller3a6a4ed2012-05-01 05:24:58 +00007594
Jeff Garzik5dd2d332008-10-16 05:09:31 -04007595#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -08007596 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
7597 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
7598 dca_remove_requester(&pdev->dev);
7599 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
7600 }
7601
7602#endif
Don Skidmore12109822012-05-04 06:07:08 +00007603#ifdef CONFIG_IXGBE_HWMON
Don Skidmore3ca8bc62012-04-12 00:33:31 +00007604 ixgbe_sysfs_exit(adapter);
Don Skidmore12109822012-05-04 06:07:08 +00007605#endif /* CONFIG_IXGBE_HWMON */
Don Skidmore3ca8bc62012-04-12 00:33:31 +00007606
PJ Waskiewicz0365e6e2009-05-17 12:32:25 +00007607 /* remove the added san mac */
7608 ixgbe_del_sanmac_netdev(netdev);
7609
Donald Skidmorec4900be2008-11-20 21:11:42 -08007610 if (netdev->reg_state == NETREG_REGISTERED)
7611 unregister_netdev(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07007612
Alexander Duyck92971272012-05-23 02:58:40 +00007613 ixgbe_disable_sriov(adapter);
Greg Rose1cdd1ec2010-01-09 02:26:46 +00007614
Alexander Duyck7a921c92009-05-06 10:43:28 +00007615 ixgbe_clear_interrupt_scheme(adapter);
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -08007616
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08007617 ixgbe_release_hw_control(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07007618
Alexander Duyck2b1588c2012-03-17 02:39:16 +00007619#ifdef CONFIG_DCB
7620 kfree(adapter->ixgbe_ieee_pfc);
7621 kfree(adapter->ixgbe_ieee_ets);
7622
7623#endif
Auke Kok9a799d72007-09-15 14:07:45 -07007624 iounmap(adapter->hw.hw_addr);
gouji-new9ce77662009-05-06 10:44:45 +00007625 pci_release_selected_regions(pdev, pci_select_bars(pdev,
Joe Perchese8e9f692010-09-07 21:34:53 +00007626 IORESOURCE_MEM));
Auke Kok9a799d72007-09-15 14:07:45 -07007627
Emil Tantilov849c4542010-06-03 16:53:41 +00007628 e_dev_info("complete\n");
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08007629
Auke Kok9a799d72007-09-15 14:07:45 -07007630 free_netdev(netdev);
7631
Frans Pop19d5afd2009-10-02 10:04:12 -07007632 pci_disable_pcie_error_reporting(pdev);
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08007633
Auke Kok9a799d72007-09-15 14:07:45 -07007634 pci_disable_device(pdev);
7635}
7636
7637/**
7638 * ixgbe_io_error_detected - called when PCI error is detected
7639 * @pdev: Pointer to PCI device
7640 * @state: The current pci connection state
7641 *
7642 * This function is called after a PCI bus error affecting
7643 * this device has been detected.
7644 */
7645static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
Joe Perchese8e9f692010-09-07 21:34:53 +00007646 pci_channel_state_t state)
Auke Kok9a799d72007-09-15 14:07:45 -07007647{
Alexander Duyckc60fbb02010-11-16 19:26:54 -08007648 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
7649 struct net_device *netdev = adapter->netdev;
Auke Kok9a799d72007-09-15 14:07:45 -07007650
Greg Rose83c61fa2011-09-07 05:59:35 +00007651#ifdef CONFIG_PCI_IOV
7652 struct pci_dev *bdev, *vfdev;
7653 u32 dw0, dw1, dw2, dw3;
7654 int vf, pos;
7655 u16 req_id, pf_func;
7656
7657 if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
7658 adapter->num_vfs == 0)
7659 goto skip_bad_vf_detection;
7660
7661 bdev = pdev->bus->self;
Yijing Wang62f87c02012-07-24 17:20:03 +08007662 while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT))
Greg Rose83c61fa2011-09-07 05:59:35 +00007663 bdev = bdev->bus->self;
7664
7665 if (!bdev)
7666 goto skip_bad_vf_detection;
7667
7668 pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR);
7669 if (!pos)
7670 goto skip_bad_vf_detection;
7671
7672 pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG, &dw0);
7673 pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 4, &dw1);
7674 pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 8, &dw2);
7675 pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 12, &dw3);
7676
7677 req_id = dw1 >> 16;
7678 /* On the 82599 if bit 7 of the requestor ID is set then it's a VF */
7679 if (!(req_id & 0x0080))
7680 goto skip_bad_vf_detection;
7681
7682 pf_func = req_id & 0x01;
7683 if ((pf_func & 1) == (pdev->devfn & 1)) {
7684 unsigned int device_id;
7685
7686 vf = (req_id & 0x7F) >> 1;
7687 e_dev_err("VF %d has caused a PCIe error\n", vf);
7688 e_dev_err("TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: "
7689 "%8.8x\tdw3: %8.8x\n",
7690 dw0, dw1, dw2, dw3);
7691 switch (adapter->hw.mac.type) {
7692 case ixgbe_mac_82599EB:
7693 device_id = IXGBE_82599_VF_DEVICE_ID;
7694 break;
7695 case ixgbe_mac_X540:
7696 device_id = IXGBE_X540_VF_DEVICE_ID;
7697 break;
7698 default:
7699 device_id = 0;
7700 break;
7701 }
7702
7703 /* Find the pci device of the offending VF */
Jon Mason36e90312012-07-19 21:02:09 +00007704 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, NULL);
Greg Rose83c61fa2011-09-07 05:59:35 +00007705 while (vfdev) {
7706 if (vfdev->devfn == (req_id & 0xFF))
7707 break;
Jon Mason36e90312012-07-19 21:02:09 +00007708 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
Greg Rose83c61fa2011-09-07 05:59:35 +00007709 device_id, vfdev);
7710 }
7711 /*
7712 * There's a slim chance the VF could have been hot plugged,
7713 * so if it is no longer present we don't need to issue the
7714 * VFLR. Just clean up the AER in that case.
7715 */
7716 if (vfdev) {
7717 e_dev_err("Issuing VFLR to VF %d\n", vf);
7718 pci_write_config_dword(vfdev, 0xA8, 0x00008000);
7719 }
7720
7721 pci_cleanup_aer_uncorrect_error_status(pdev);
7722 }
7723
7724 /*
7725 * Even though the error may have occurred on the other port
7726 * we still need to increment the vf error reference count for
7727 * both ports because the I/O resume function will be called
7728 * for both of them.
7729 */
7730 adapter->vferr_refcount++;
7731
7732 return PCI_ERS_RESULT_RECOVERED;
7733
7734skip_bad_vf_detection:
7735#endif /* CONFIG_PCI_IOV */
Auke Kok9a799d72007-09-15 14:07:45 -07007736 netif_device_detach(netdev);
7737
Breno Leitao3044b8d2009-05-06 10:44:26 +00007738 if (state == pci_channel_io_perm_failure)
7739 return PCI_ERS_RESULT_DISCONNECT;
7740
Auke Kok9a799d72007-09-15 14:07:45 -07007741 if (netif_running(netdev))
7742 ixgbe_down(adapter);
7743 pci_disable_device(pdev);
7744
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07007745 /* Request a slot reset. */
Auke Kok9a799d72007-09-15 14:07:45 -07007746 return PCI_ERS_RESULT_NEED_RESET;
7747}
7748
7749/**
7750 * ixgbe_io_slot_reset - called after the pci bus has been reset.
7751 * @pdev: Pointer to PCI device
7752 *
7753 * Restart the card from scratch, as if from a cold-boot.
7754 */
7755static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
7756{
Alexander Duyckc60fbb02010-11-16 19:26:54 -08007757 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08007758 pci_ers_result_t result;
7759 int err;
Auke Kok9a799d72007-09-15 14:07:45 -07007760
gouji-new9ce77662009-05-06 10:44:45 +00007761 if (pci_enable_device_mem(pdev)) {
Emil Tantilov396e7992010-07-01 20:05:12 +00007762 e_err(probe, "Cannot re-enable PCI device after reset.\n");
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08007763 result = PCI_ERS_RESULT_DISCONNECT;
7764 } else {
7765 pci_set_master(pdev);
7766 pci_restore_state(pdev);
Breno Leitaoc0e1f682009-11-10 08:37:47 +00007767 pci_save_state(pdev);
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08007768
Don Skidmoredd4d8ca2009-04-29 00:22:31 -07007769 pci_wake_from_d3(pdev, false);
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08007770
7771 ixgbe_reset(adapter);
PJ Waskiewicz88512532009-03-13 22:15:10 +00007772 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08007773 result = PCI_ERS_RESULT_RECOVERED;
Auke Kok9a799d72007-09-15 14:07:45 -07007774 }
Auke Kok9a799d72007-09-15 14:07:45 -07007775
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08007776 err = pci_cleanup_aer_uncorrect_error_status(pdev);
7777 if (err) {
Emil Tantilov849c4542010-06-03 16:53:41 +00007778 e_dev_err("pci_cleanup_aer_uncorrect_error_status "
7779 "failed 0x%0x\n", err);
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08007780 /* non-fatal, continue */
7781 }
Auke Kok9a799d72007-09-15 14:07:45 -07007782
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08007783 return result;
Auke Kok9a799d72007-09-15 14:07:45 -07007784}
7785
7786/**
7787 * ixgbe_io_resume - called when traffic can start flowing again.
7788 * @pdev: Pointer to PCI device
7789 *
7790 * This callback is called when the error recovery driver tells us that
7791 * its OK to resume normal operation.
7792 */
7793static void ixgbe_io_resume(struct pci_dev *pdev)
7794{
Alexander Duyckc60fbb02010-11-16 19:26:54 -08007795 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
7796 struct net_device *netdev = adapter->netdev;
Auke Kok9a799d72007-09-15 14:07:45 -07007797
Greg Rose83c61fa2011-09-07 05:59:35 +00007798#ifdef CONFIG_PCI_IOV
7799 if (adapter->vferr_refcount) {
7800 e_info(drv, "Resuming after VF err\n");
7801 adapter->vferr_refcount--;
7802 return;
7803 }
7804
7805#endif
Alexander Duyckc7ccde02011-07-21 00:40:40 +00007806 if (netif_running(netdev))
7807 ixgbe_up(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07007808
7809 netif_device_attach(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07007810}
7811
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07007812static const struct pci_error_handlers ixgbe_err_handler = {
Auke Kok9a799d72007-09-15 14:07:45 -07007813 .error_detected = ixgbe_io_error_detected,
7814 .slot_reset = ixgbe_io_slot_reset,
7815 .resume = ixgbe_io_resume,
7816};
7817
7818static struct pci_driver ixgbe_driver = {
7819 .name = ixgbe_driver_name,
7820 .id_table = ixgbe_pci_tbl,
7821 .probe = ixgbe_probe,
7822 .remove = __devexit_p(ixgbe_remove),
7823#ifdef CONFIG_PM
7824 .suspend = ixgbe_suspend,
7825 .resume = ixgbe_resume,
7826#endif
7827 .shutdown = ixgbe_shutdown,
7828 .err_handler = &ixgbe_err_handler
7829};
7830
7831/**
7832 * ixgbe_init_module - Driver Registration Routine
7833 *
7834 * ixgbe_init_module is the first routine called when the driver is
7835 * loaded. All it does is register with the PCI subsystem.
7836 **/
7837static int __init ixgbe_init_module(void)
7838{
7839 int ret;
Joe Perchesc7689572010-09-07 21:35:17 +00007840 pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
Emil Tantilov849c4542010-06-03 16:53:41 +00007841 pr_info("%s\n", ixgbe_copyright);
Auke Kok9a799d72007-09-15 14:07:45 -07007842
Catherine Sullivan00949162012-08-10 01:59:10 +00007843#ifdef CONFIG_DEBUG_FS
7844 ixgbe_dbg_init();
7845#endif /* CONFIG_DEBUG_FS */
7846
Jeff Garzik5dd2d332008-10-16 05:09:31 -04007847#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -08007848 dca_register_notify(&dca_notifier);
Jeb Cramerbd0362d2008-03-03 15:04:02 -08007849#endif
Jeff Garzik5dd2d332008-10-16 05:09:31 -04007850
Auke Kok9a799d72007-09-15 14:07:45 -07007851 ret = pci_register_driver(&ixgbe_driver);
7852 return ret;
7853}
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07007854
Auke Kok9a799d72007-09-15 14:07:45 -07007855module_init(ixgbe_init_module);
7856
7857/**
7858 * ixgbe_exit_module - Driver Exit Cleanup Routine
7859 *
7860 * ixgbe_exit_module is called just before the driver is removed
7861 * from memory.
7862 **/
7863static void __exit ixgbe_exit_module(void)
7864{
Jeff Garzik5dd2d332008-10-16 05:09:31 -04007865#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -08007866 dca_unregister_notify(&dca_notifier);
7867#endif
Auke Kok9a799d72007-09-15 14:07:45 -07007868 pci_unregister_driver(&ixgbe_driver);
Catherine Sullivan00949162012-08-10 01:59:10 +00007869
7870#ifdef CONFIG_DEBUG_FS
7871 ixgbe_dbg_exit();
7872#endif /* CONFIG_DEBUG_FS */
7873
Eric Dumazet1a515022010-11-16 19:26:42 -08007874 rcu_barrier(); /* Wait for completion of call_rcu()'s */
Auke Kok9a799d72007-09-15 14:07:45 -07007875}
Jeb Cramerbd0362d2008-03-03 15:04:02 -08007876
Jeff Garzik5dd2d332008-10-16 05:09:31 -04007877#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -08007878static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
Joe Perchese8e9f692010-09-07 21:34:53 +00007879 void *p)
Jeb Cramerbd0362d2008-03-03 15:04:02 -08007880{
7881 int ret_val;
7882
7883 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
Joe Perchese8e9f692010-09-07 21:34:53 +00007884 __ixgbe_notify_dca);
Jeb Cramerbd0362d2008-03-03 15:04:02 -08007885
7886 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
7887}
Jeb Cramerbd0362d2008-03-03 15:04:02 -08007888
Alexander Duyckb4533682009-03-31 21:32:42 +00007889#endif /* CONFIG_IXGBE_DCA */
Emil Tantilov849c4542010-06-03 16:53:41 +00007890
Auke Kok9a799d72007-09-15 14:07:45 -07007891module_exit(ixgbe_exit_module);
7892
7893/* ixgbe_main.c */