blob: c5c93408212d1b520b2865cb48a33d2a4e15736c [file] [log] [blame]
Auke Kok9a799d72007-09-15 14:07:45 -07001/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
Shannon Nelson8c47eaa2010-01-13 01:49:34 +00004 Copyright(c) 1999 - 2010 Intel Corporation.
Auke Kok9a799d72007-09-15 14:07:45 -07005
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
Auke Kok9a799d72007-09-15 14:07:45 -070023 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/types.h>
29#include <linux/module.h>
30#include <linux/pci.h>
31#include <linux/netdevice.h>
32#include <linux/vmalloc.h>
33#include <linux/string.h>
34#include <linux/in.h>
35#include <linux/ip.h>
36#include <linux/tcp.h>
Lucy Liu60127862009-07-22 14:07:33 +000037#include <linux/pkt_sched.h>
Auke Kok9a799d72007-09-15 14:07:45 -070038#include <linux/ipv6.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090039#include <linux/slab.h>
Auke Kok9a799d72007-09-15 14:07:45 -070040#include <net/checksum.h>
41#include <net/ip6_checksum.h>
42#include <linux/ethtool.h>
43#include <linux/if_vlan.h>
Yi Zoueacd73f2009-05-13 13:11:06 +000044#include <scsi/fc/fc_fcoe.h>
Auke Kok9a799d72007-09-15 14:07:45 -070045
46#include "ixgbe.h"
47#include "ixgbe_common.h"
Don Skidmoreee5f7842009-11-06 12:56:20 +000048#include "ixgbe_dcb_82599.h"
Greg Rose1cdd1ec2010-01-09 02:26:46 +000049#include "ixgbe_sriov.h"
Auke Kok9a799d72007-09-15 14:07:45 -070050
51char ixgbe_driver_name[] = "ixgbe";
Stephen Hemminger9c8eb722007-10-29 10:46:24 -070052static const char ixgbe_driver_string[] =
Joe Perchese8e9f692010-09-07 21:34:53 +000053 "Intel(R) 10 Gigabit PCI Express Network Driver";
Auke Kok9a799d72007-09-15 14:07:45 -070054
Don Skidmore9a2d09c2010-11-21 09:55:10 -080055#define DRV_VERSION "3.0.12-k2"
Stephen Hemminger9c8eb722007-10-29 10:46:24 -070056const char ixgbe_driver_version[] = DRV_VERSION;
Shannon Nelson8c47eaa2010-01-13 01:49:34 +000057static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation.";
Auke Kok9a799d72007-09-15 14:07:45 -070058
59static const struct ixgbe_info *ixgbe_info_tbl[] = {
Peter P Waskiewiczb4617242008-09-11 20:04:46 -070060 [board_82598] = &ixgbe_82598_info,
PJ Waskiewicze8e26352009-02-27 15:45:05 +000061 [board_82599] = &ixgbe_82599_info,
Don Skidmorefe15e8e12010-11-16 19:27:16 -080062 [board_X540] = &ixgbe_X540_info,
Auke Kok9a799d72007-09-15 14:07:45 -070063};
64
65/* ixgbe_pci_tbl - PCI Device ID Table
66 *
67 * Wildcard entries (PCI_ANY_ID) should come last
68 * Last entry must be all 0s
69 *
70 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
71 * Class, Class Mask, private data (not used) }
72 */
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000073static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
Don Skidmore1e336d02009-01-26 20:57:51 -080074 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598),
75 board_82598 },
Auke Kok9a799d72007-09-15 14:07:45 -070076 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT),
Auke Kok3957d632007-10-31 15:22:10 -070077 board_82598 },
Auke Kok9a799d72007-09-15 14:07:45 -070078 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT),
Auke Kok3957d632007-10-31 15:22:10 -070079 board_82598 },
Jesse Brandeburg0befdb32008-10-31 00:46:40 -070080 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT),
81 board_82598 },
Peter P Waskiewicz Jr3845bec2009-07-16 15:50:52 +000082 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2),
83 board_82598 },
Auke Kok9a799d72007-09-15 14:07:45 -070084 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4),
Auke Kok3957d632007-10-31 15:22:10 -070085 board_82598 },
Jesse Brandeburg8d792cd2008-08-08 16:24:19 -070086 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT),
87 board_82598 },
Donald Skidmorec4900be2008-11-20 21:11:42 -080088 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT),
89 board_82598 },
90 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM),
91 board_82598 },
Jesse Brandeburgb95f5fc2008-09-11 19:58:59 -070092 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR),
93 board_82598 },
Donald Skidmorec4900be2008-11-20 21:11:42 -080094 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM),
95 board_82598 },
Don Skidmore2f21bdd2009-02-01 01:18:23 -080096 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX),
97 board_82598 },
PJ Waskiewicze8e26352009-02-27 15:45:05 +000098 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4),
99 board_82599 },
Peter P Waskiewicz Jr1fcf03e2009-05-17 20:58:04 +0000100 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM),
101 board_82599 },
Don Skidmore74757d42009-12-08 07:22:23 +0000102 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR),
103 board_82599 },
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000104 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP),
105 board_82599 },
Don Skidmore38ad1c82009-10-08 15:35:58 +0000106 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM),
107 board_82599 },
Don Skidmoredbfec662009-10-02 08:58:25 +0000108 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ),
109 board_82599 },
Peter P Waskiewicz Jr89111842009-09-14 07:47:49 +0000110 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4),
111 board_82599 },
Don Skidmoredbffcb22010-12-03 03:32:34 +0000112 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE),
113 board_82599 },
114 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE),
115 board_82599 },
Mallikarjuna R Chilakala119fc602010-05-20 23:07:06 -0700116 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM),
117 board_82599 },
Don Skidmore312eb932009-10-02 08:58:04 +0000118 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE),
119 board_82599 },
Don Skidmoreb93a2222010-11-16 19:27:17 -0800120 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T),
121 board_82599 },
Auke Kok9a799d72007-09-15 14:07:45 -0700122
123 /* required last entry */
124 {0, }
125};
126MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
127
Jeff Garzik5dd2d332008-10-16 05:09:31 -0400128#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800129static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
Joe Perchese8e9f692010-09-07 21:34:53 +0000130 void *p);
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800131static struct notifier_block dca_notifier = {
132 .notifier_call = ixgbe_notify_dca,
133 .next = NULL,
134 .priority = 0
135};
136#endif
137
Greg Rose1cdd1ec2010-01-09 02:26:46 +0000138#ifdef CONFIG_PCI_IOV
139static unsigned int max_vfs;
140module_param(max_vfs, uint, 0);
Joe Perchese8e9f692010-09-07 21:34:53 +0000141MODULE_PARM_DESC(max_vfs,
142 "Maximum number of virtual functions to allocate per physical function");
Greg Rose1cdd1ec2010-01-09 02:26:46 +0000143#endif /* CONFIG_PCI_IOV */
144
Auke Kok9a799d72007-09-15 14:07:45 -0700145MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
146MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
147MODULE_LICENSE("GPL");
148MODULE_VERSION(DRV_VERSION);
149
150#define DEFAULT_DEBUG_LEVEL_SHIFT 3
151
Greg Rose1cdd1ec2010-01-09 02:26:46 +0000152static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
153{
154 struct ixgbe_hw *hw = &adapter->hw;
155 u32 gcr;
156 u32 gpie;
157 u32 vmdctl;
158
159#ifdef CONFIG_PCI_IOV
160 /* disable iov and allow time for transactions to clear */
161 pci_disable_sriov(adapter->pdev);
162#endif
163
164 /* turn off device IOV mode */
165 gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
166 gcr &= ~(IXGBE_GCR_EXT_SRIOV);
167 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr);
168 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
169 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
170 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
171
172 /* set default pool back to 0 */
173 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
174 vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
175 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
176
177 /* take a breather then clean up driver data */
178 msleep(100);
Joe Perchese8e9f692010-09-07 21:34:53 +0000179
180 kfree(adapter->vfinfo);
Greg Rose1cdd1ec2010-01-09 02:26:46 +0000181 adapter->vfinfo = NULL;
182
183 adapter->num_vfs = 0;
184 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
185}
186
Taku Izumidcd79ae2010-04-27 14:39:53 +0000187struct ixgbe_reg_info {
188 u32 ofs;
189 char *name;
190};
191
192static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
193
194 /* General Registers */
195 {IXGBE_CTRL, "CTRL"},
196 {IXGBE_STATUS, "STATUS"},
197 {IXGBE_CTRL_EXT, "CTRL_EXT"},
198
199 /* Interrupt Registers */
200 {IXGBE_EICR, "EICR"},
201
202 /* RX Registers */
203 {IXGBE_SRRCTL(0), "SRRCTL"},
204 {IXGBE_DCA_RXCTRL(0), "DRXCTL"},
205 {IXGBE_RDLEN(0), "RDLEN"},
206 {IXGBE_RDH(0), "RDH"},
207 {IXGBE_RDT(0), "RDT"},
208 {IXGBE_RXDCTL(0), "RXDCTL"},
209 {IXGBE_RDBAL(0), "RDBAL"},
210 {IXGBE_RDBAH(0), "RDBAH"},
211
212 /* TX Registers */
213 {IXGBE_TDBAL(0), "TDBAL"},
214 {IXGBE_TDBAH(0), "TDBAH"},
215 {IXGBE_TDLEN(0), "TDLEN"},
216 {IXGBE_TDH(0), "TDH"},
217 {IXGBE_TDT(0), "TDT"},
218 {IXGBE_TXDCTL(0), "TXDCTL"},
219
220 /* List Terminator */
221 {}
222};
223
224
225/*
226 * ixgbe_regdump - register printout routine
227 */
228static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
229{
230 int i = 0, j = 0;
231 char rname[16];
232 u32 regs[64];
233
234 switch (reginfo->ofs) {
235 case IXGBE_SRRCTL(0):
236 for (i = 0; i < 64; i++)
237 regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
238 break;
239 case IXGBE_DCA_RXCTRL(0):
240 for (i = 0; i < 64; i++)
241 regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
242 break;
243 case IXGBE_RDLEN(0):
244 for (i = 0; i < 64; i++)
245 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
246 break;
247 case IXGBE_RDH(0):
248 for (i = 0; i < 64; i++)
249 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
250 break;
251 case IXGBE_RDT(0):
252 for (i = 0; i < 64; i++)
253 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
254 break;
255 case IXGBE_RXDCTL(0):
256 for (i = 0; i < 64; i++)
257 regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
258 break;
259 case IXGBE_RDBAL(0):
260 for (i = 0; i < 64; i++)
261 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
262 break;
263 case IXGBE_RDBAH(0):
264 for (i = 0; i < 64; i++)
265 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
266 break;
267 case IXGBE_TDBAL(0):
268 for (i = 0; i < 64; i++)
269 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
270 break;
271 case IXGBE_TDBAH(0):
272 for (i = 0; i < 64; i++)
273 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
274 break;
275 case IXGBE_TDLEN(0):
276 for (i = 0; i < 64; i++)
277 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
278 break;
279 case IXGBE_TDH(0):
280 for (i = 0; i < 64; i++)
281 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
282 break;
283 case IXGBE_TDT(0):
284 for (i = 0; i < 64; i++)
285 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
286 break;
287 case IXGBE_TXDCTL(0):
288 for (i = 0; i < 64; i++)
289 regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
290 break;
291 default:
Joe Perchesc7689572010-09-07 21:35:17 +0000292 pr_info("%-15s %08x\n", reginfo->name,
Taku Izumidcd79ae2010-04-27 14:39:53 +0000293 IXGBE_READ_REG(hw, reginfo->ofs));
294 return;
295 }
296
297 for (i = 0; i < 8; i++) {
298 snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7);
Joe Perchesc7689572010-09-07 21:35:17 +0000299 pr_err("%-15s", rname);
Taku Izumidcd79ae2010-04-27 14:39:53 +0000300 for (j = 0; j < 8; j++)
Joe Perchesc7689572010-09-07 21:35:17 +0000301 pr_cont(" %08x", regs[i*8+j]);
302 pr_cont("\n");
Taku Izumidcd79ae2010-04-27 14:39:53 +0000303 }
304
305}
306
307/*
308 * ixgbe_dump - Print registers, tx-rings and rx-rings
309 */
310static void ixgbe_dump(struct ixgbe_adapter *adapter)
311{
312 struct net_device *netdev = adapter->netdev;
313 struct ixgbe_hw *hw = &adapter->hw;
314 struct ixgbe_reg_info *reginfo;
315 int n = 0;
316 struct ixgbe_ring *tx_ring;
317 struct ixgbe_tx_buffer *tx_buffer_info;
318 union ixgbe_adv_tx_desc *tx_desc;
319 struct my_u0 { u64 a; u64 b; } *u0;
320 struct ixgbe_ring *rx_ring;
321 union ixgbe_adv_rx_desc *rx_desc;
322 struct ixgbe_rx_buffer *rx_buffer_info;
323 u32 staterr;
324 int i = 0;
325
326 if (!netif_msg_hw(adapter))
327 return;
328
329 /* Print netdevice Info */
330 if (netdev) {
331 dev_info(&adapter->pdev->dev, "Net device Info\n");
Joe Perchesc7689572010-09-07 21:35:17 +0000332 pr_info("Device Name state "
Taku Izumidcd79ae2010-04-27 14:39:53 +0000333 "trans_start last_rx\n");
Joe Perchesc7689572010-09-07 21:35:17 +0000334 pr_info("%-15s %016lX %016lX %016lX\n",
335 netdev->name,
336 netdev->state,
337 netdev->trans_start,
338 netdev->last_rx);
Taku Izumidcd79ae2010-04-27 14:39:53 +0000339 }
340
341 /* Print Registers */
342 dev_info(&adapter->pdev->dev, "Register Dump\n");
Joe Perchesc7689572010-09-07 21:35:17 +0000343 pr_info(" Register Name Value\n");
Taku Izumidcd79ae2010-04-27 14:39:53 +0000344 for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
345 reginfo->name; reginfo++) {
346 ixgbe_regdump(hw, reginfo);
347 }
348
349 /* Print TX Ring Summary */
350 if (!netdev || !netif_running(netdev))
351 goto exit;
352
353 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
Joe Perchesc7689572010-09-07 21:35:17 +0000354 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
Taku Izumidcd79ae2010-04-27 14:39:53 +0000355 for (n = 0; n < adapter->num_tx_queues; n++) {
356 tx_ring = adapter->tx_ring[n];
357 tx_buffer_info =
358 &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
Joe Perchesc7689572010-09-07 21:35:17 +0000359 pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n",
Taku Izumidcd79ae2010-04-27 14:39:53 +0000360 n, tx_ring->next_to_use, tx_ring->next_to_clean,
361 (u64)tx_buffer_info->dma,
362 tx_buffer_info->length,
363 tx_buffer_info->next_to_watch,
364 (u64)tx_buffer_info->time_stamp);
365 }
366
367 /* Print TX Rings */
368 if (!netif_msg_tx_done(adapter))
369 goto rx_ring_summary;
370
371 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
372
373 /* Transmit Descriptor Formats
374 *
375 * Advanced Transmit Descriptor
376 * +--------------------------------------------------------------+
377 * 0 | Buffer Address [63:0] |
378 * +--------------------------------------------------------------+
379 * 8 | PAYLEN | PORTS | IDX | STA | DCMD |DTYP | RSV | DTALEN |
380 * +--------------------------------------------------------------+
381 * 63 46 45 40 39 36 35 32 31 24 23 20 19 0
382 */
383
384 for (n = 0; n < adapter->num_tx_queues; n++) {
385 tx_ring = adapter->tx_ring[n];
Joe Perchesc7689572010-09-07 21:35:17 +0000386 pr_info("------------------------------------\n");
387 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
388 pr_info("------------------------------------\n");
389 pr_info("T [desc] [address 63:0 ] "
Taku Izumidcd79ae2010-04-27 14:39:53 +0000390 "[PlPOIdStDDt Ln] [bi->dma ] "
391 "leng ntw timestamp bi->skb\n");
392
393 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
Alexander Duyck31f05a22010-08-19 13:40:31 +0000394 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
Taku Izumidcd79ae2010-04-27 14:39:53 +0000395 tx_buffer_info = &tx_ring->tx_buffer_info[i];
396 u0 = (struct my_u0 *)tx_desc;
Joe Perchesc7689572010-09-07 21:35:17 +0000397 pr_info("T [0x%03X] %016llX %016llX %016llX"
Taku Izumidcd79ae2010-04-27 14:39:53 +0000398 " %04X %3X %016llX %p", i,
399 le64_to_cpu(u0->a),
400 le64_to_cpu(u0->b),
401 (u64)tx_buffer_info->dma,
402 tx_buffer_info->length,
403 tx_buffer_info->next_to_watch,
404 (u64)tx_buffer_info->time_stamp,
405 tx_buffer_info->skb);
406 if (i == tx_ring->next_to_use &&
407 i == tx_ring->next_to_clean)
Joe Perchesc7689572010-09-07 21:35:17 +0000408 pr_cont(" NTC/U\n");
Taku Izumidcd79ae2010-04-27 14:39:53 +0000409 else if (i == tx_ring->next_to_use)
Joe Perchesc7689572010-09-07 21:35:17 +0000410 pr_cont(" NTU\n");
Taku Izumidcd79ae2010-04-27 14:39:53 +0000411 else if (i == tx_ring->next_to_clean)
Joe Perchesc7689572010-09-07 21:35:17 +0000412 pr_cont(" NTC\n");
Taku Izumidcd79ae2010-04-27 14:39:53 +0000413 else
Joe Perchesc7689572010-09-07 21:35:17 +0000414 pr_cont("\n");
Taku Izumidcd79ae2010-04-27 14:39:53 +0000415
416 if (netif_msg_pktdata(adapter) &&
417 tx_buffer_info->dma != 0)
418 print_hex_dump(KERN_INFO, "",
419 DUMP_PREFIX_ADDRESS, 16, 1,
420 phys_to_virt(tx_buffer_info->dma),
421 tx_buffer_info->length, true);
422 }
423 }
424
425 /* Print RX Rings Summary */
426rx_ring_summary:
427 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
Joe Perchesc7689572010-09-07 21:35:17 +0000428 pr_info("Queue [NTU] [NTC]\n");
Taku Izumidcd79ae2010-04-27 14:39:53 +0000429 for (n = 0; n < adapter->num_rx_queues; n++) {
430 rx_ring = adapter->rx_ring[n];
Joe Perchesc7689572010-09-07 21:35:17 +0000431 pr_info("%5d %5X %5X\n",
432 n, rx_ring->next_to_use, rx_ring->next_to_clean);
Taku Izumidcd79ae2010-04-27 14:39:53 +0000433 }
434
435 /* Print RX Rings */
436 if (!netif_msg_rx_status(adapter))
437 goto exit;
438
439 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
440
441 /* Advanced Receive Descriptor (Read) Format
442 * 63 1 0
443 * +-----------------------------------------------------+
444 * 0 | Packet Buffer Address [63:1] |A0/NSE|
445 * +----------------------------------------------+------+
446 * 8 | Header Buffer Address [63:1] | DD |
447 * +-----------------------------------------------------+
448 *
449 *
450 * Advanced Receive Descriptor (Write-Back) Format
451 *
452 * 63 48 47 32 31 30 21 20 16 15 4 3 0
453 * +------------------------------------------------------+
454 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
455 * | Checksum Ident | | | | Type | Type |
456 * +------------------------------------------------------+
457 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
458 * +------------------------------------------------------+
459 * 63 48 47 32 31 20 19 0
460 */
461 for (n = 0; n < adapter->num_rx_queues; n++) {
462 rx_ring = adapter->rx_ring[n];
Joe Perchesc7689572010-09-07 21:35:17 +0000463 pr_info("------------------------------------\n");
464 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
465 pr_info("------------------------------------\n");
466 pr_info("R [desc] [ PktBuf A0] "
Taku Izumidcd79ae2010-04-27 14:39:53 +0000467 "[ HeadBuf DD] [bi->dma ] [bi->skb] "
468 "<-- Adv Rx Read format\n");
Joe Perchesc7689572010-09-07 21:35:17 +0000469 pr_info("RWB[desc] [PcsmIpSHl PtRs] "
Taku Izumidcd79ae2010-04-27 14:39:53 +0000470 "[vl er S cks ln] ---------------- [bi->skb] "
471 "<-- Adv Rx Write-Back format\n");
472
473 for (i = 0; i < rx_ring->count; i++) {
474 rx_buffer_info = &rx_ring->rx_buffer_info[i];
Alexander Duyck31f05a22010-08-19 13:40:31 +0000475 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
Taku Izumidcd79ae2010-04-27 14:39:53 +0000476 u0 = (struct my_u0 *)rx_desc;
477 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
478 if (staterr & IXGBE_RXD_STAT_DD) {
479 /* Descriptor Done */
Joe Perchesc7689572010-09-07 21:35:17 +0000480 pr_info("RWB[0x%03X] %016llX "
Taku Izumidcd79ae2010-04-27 14:39:53 +0000481 "%016llX ---------------- %p", i,
482 le64_to_cpu(u0->a),
483 le64_to_cpu(u0->b),
484 rx_buffer_info->skb);
485 } else {
Joe Perchesc7689572010-09-07 21:35:17 +0000486 pr_info("R [0x%03X] %016llX "
Taku Izumidcd79ae2010-04-27 14:39:53 +0000487 "%016llX %016llX %p", i,
488 le64_to_cpu(u0->a),
489 le64_to_cpu(u0->b),
490 (u64)rx_buffer_info->dma,
491 rx_buffer_info->skb);
492
493 if (netif_msg_pktdata(adapter)) {
494 print_hex_dump(KERN_INFO, "",
495 DUMP_PREFIX_ADDRESS, 16, 1,
496 phys_to_virt(rx_buffer_info->dma),
497 rx_ring->rx_buf_len, true);
498
499 if (rx_ring->rx_buf_len
500 < IXGBE_RXBUFFER_2048)
501 print_hex_dump(KERN_INFO, "",
502 DUMP_PREFIX_ADDRESS, 16, 1,
503 phys_to_virt(
504 rx_buffer_info->page_dma +
505 rx_buffer_info->page_offset
506 ),
507 PAGE_SIZE/2, true);
508 }
509 }
510
511 if (i == rx_ring->next_to_use)
Joe Perchesc7689572010-09-07 21:35:17 +0000512 pr_cont(" NTU\n");
Taku Izumidcd79ae2010-04-27 14:39:53 +0000513 else if (i == rx_ring->next_to_clean)
Joe Perchesc7689572010-09-07 21:35:17 +0000514 pr_cont(" NTC\n");
Taku Izumidcd79ae2010-04-27 14:39:53 +0000515 else
Joe Perchesc7689572010-09-07 21:35:17 +0000516 pr_cont("\n");
Taku Izumidcd79ae2010-04-27 14:39:53 +0000517
518 }
519 }
520
521exit:
522 return;
523}
524
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -0800525static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
526{
527 u32 ctrl_ext;
528
529 /* Let firmware take over control of h/w */
530 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
531 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
Joe Perchese8e9f692010-09-07 21:34:53 +0000532 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -0800533}
534
535static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
536{
537 u32 ctrl_ext;
538
539 /* Let firmware know the driver has taken over */
540 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
541 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
Joe Perchese8e9f692010-09-07 21:34:53 +0000542 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -0800543}
Auke Kok9a799d72007-09-15 14:07:45 -0700544
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000545/*
546 * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
547 * @adapter: pointer to adapter struct
548 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
549 * @queue: queue to map the corresponding interrupt to
550 * @msix_vector: the vector to map to the corresponding queue
551 *
552 */
553static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
Joe Perchese8e9f692010-09-07 21:34:53 +0000554 u8 queue, u8 msix_vector)
Auke Kok9a799d72007-09-15 14:07:45 -0700555{
556 u32 ivar, index;
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000557 struct ixgbe_hw *hw = &adapter->hw;
558 switch (hw->mac.type) {
559 case ixgbe_mac_82598EB:
560 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
561 if (direction == -1)
562 direction = 0;
563 index = (((direction * 64) + queue) >> 2) & 0x1F;
564 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
565 ivar &= ~(0xFF << (8 * (queue & 0x3)));
566 ivar |= (msix_vector << (8 * (queue & 0x3)));
567 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
568 break;
569 case ixgbe_mac_82599EB:
Don Skidmoreb93a2222010-11-16 19:27:17 -0800570 case ixgbe_mac_X540:
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000571 if (direction == -1) {
572 /* other causes */
573 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
574 index = ((queue & 1) * 8);
575 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
576 ivar &= ~(0xFF << index);
577 ivar |= (msix_vector << index);
578 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
579 break;
580 } else {
581 /* tx or rx causes */
582 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
583 index = ((16 * (queue & 1)) + (8 * direction));
584 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
585 ivar &= ~(0xFF << index);
586 ivar |= (msix_vector << index);
587 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
588 break;
589 }
590 default:
591 break;
592 }
Auke Kok9a799d72007-09-15 14:07:45 -0700593}
594
Alexander Duyckfe49f042009-06-04 16:00:09 +0000595static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
Joe Perchese8e9f692010-09-07 21:34:53 +0000596 u64 qmask)
Alexander Duyckfe49f042009-06-04 16:00:09 +0000597{
598 u32 mask;
599
Alexander Duyckbd508172010-11-16 19:27:03 -0800600 switch (adapter->hw.mac.type) {
601 case ixgbe_mac_82598EB:
Alexander Duyckfe49f042009-06-04 16:00:09 +0000602 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
603 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
Alexander Duyckbd508172010-11-16 19:27:03 -0800604 break;
605 case ixgbe_mac_82599EB:
Don Skidmoreb93a2222010-11-16 19:27:17 -0800606 case ixgbe_mac_X540:
Alexander Duyckfe49f042009-06-04 16:00:09 +0000607 mask = (qmask & 0xFFFFFFFF);
608 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
609 mask = (qmask >> 32);
610 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
Alexander Duyckbd508172010-11-16 19:27:03 -0800611 break;
612 default:
613 break;
Alexander Duyckfe49f042009-06-04 16:00:09 +0000614 }
615}
616
Alexander Duyckb6ec8952010-11-16 19:26:49 -0800617void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring,
618 struct ixgbe_tx_buffer *tx_buffer_info)
Auke Kok9a799d72007-09-15 14:07:45 -0700619{
Alexander Duycke5a43542009-12-02 16:46:56 +0000620 if (tx_buffer_info->dma) {
621 if (tx_buffer_info->mapped_as_page)
Alexander Duyckb6ec8952010-11-16 19:26:49 -0800622 dma_unmap_page(tx_ring->dev,
Alexander Duycke5a43542009-12-02 16:46:56 +0000623 tx_buffer_info->dma,
624 tx_buffer_info->length,
Nick Nunley1b507732010-04-27 13:10:27 +0000625 DMA_TO_DEVICE);
Alexander Duycke5a43542009-12-02 16:46:56 +0000626 else
Alexander Duyckb6ec8952010-11-16 19:26:49 -0800627 dma_unmap_single(tx_ring->dev,
Alexander Duycke5a43542009-12-02 16:46:56 +0000628 tx_buffer_info->dma,
629 tx_buffer_info->length,
Nick Nunley1b507732010-04-27 13:10:27 +0000630 DMA_TO_DEVICE);
Alexander Duycke5a43542009-12-02 16:46:56 +0000631 tx_buffer_info->dma = 0;
632 }
Auke Kok9a799d72007-09-15 14:07:45 -0700633 if (tx_buffer_info->skb) {
634 dev_kfree_skb_any(tx_buffer_info->skb);
635 tx_buffer_info->skb = NULL;
636 }
Alexander Duyck44df32c2009-03-31 21:34:23 +0000637 tx_buffer_info->time_stamp = 0;
Auke Kok9a799d72007-09-15 14:07:45 -0700638 /* tx_buffer_info must be completely set up in the transmit path */
639}
640
Yi Zou26f23d82009-11-06 12:56:00 +0000641/**
John Fastabendc84d3242010-11-16 19:27:12 -0800642 * ixgbe_dcb_txq_to_tc - convert a reg index to a traffic class
643 * @adapter: driver private struct
644 * @index: reg idx of queue to query (0-127)
Yi Zou26f23d82009-11-06 12:56:00 +0000645 *
John Fastabendc84d3242010-11-16 19:27:12 -0800646 * Helper function to determine the traffic index for a paticular
647 * register index.
Yi Zou26f23d82009-11-06 12:56:00 +0000648 *
John Fastabendc84d3242010-11-16 19:27:12 -0800649 * Returns : a tc index for use in range 0-7, or 0-3
Yi Zou26f23d82009-11-06 12:56:00 +0000650 */
John Fastabendc84d3242010-11-16 19:27:12 -0800651u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 reg_idx)
Yi Zou26f23d82009-11-06 12:56:00 +0000652{
John Fastabendc84d3242010-11-16 19:27:12 -0800653 int tc = -1;
654 int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
Yi Zou26f23d82009-11-06 12:56:00 +0000655
John Fastabendc84d3242010-11-16 19:27:12 -0800656 /* if DCB is not enabled the queues have no TC */
657 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
658 return tc;
Yi Zou26f23d82009-11-06 12:56:00 +0000659
John Fastabendc84d3242010-11-16 19:27:12 -0800660 /* check valid range */
661 if (reg_idx >= adapter->hw.mac.max_tx_queues)
662 return tc;
663
664 switch (adapter->hw.mac.type) {
665 case ixgbe_mac_82598EB:
666 tc = reg_idx >> 2;
667 break;
668 default:
669 if (dcb_i != 4 && dcb_i != 8)
PJ Waskiewicz6837e892010-01-06 17:50:29 +0000670 break;
John Fastabendc84d3242010-11-16 19:27:12 -0800671
672 /* if VMDq is enabled the lowest order bits determine TC */
673 if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
674 IXGBE_FLAG_VMDQ_ENABLED)) {
675 tc = reg_idx & (dcb_i - 1);
Alexander Duyckbd508172010-11-16 19:27:03 -0800676 break;
Yi Zou26f23d82009-11-06 12:56:00 +0000677 }
John Fastabendc84d3242010-11-16 19:27:12 -0800678
679 /*
680 * Convert the reg_idx into the correct TC. This bitmask
681 * targets the last full 32 ring traffic class and assigns
682 * it a value of 1. From there the rest of the rings are
683 * based on shifting the mask further up to include the
684 * reg_idx / 16 and then reg_idx / 8. It assumes dcB_i
685 * will only ever be 8 or 4 and that reg_idx will never
686 * be greater then 128. The code without the power of 2
687 * optimizations would be:
688 * (((reg_idx % 32) + 32) * dcb_i) >> (9 - reg_idx / 32)
689 */
690 tc = ((reg_idx & 0X1F) + 0x20) * dcb_i;
691 tc >>= 9 - (reg_idx >> 5);
Yi Zou26f23d82009-11-06 12:56:00 +0000692 }
John Fastabendc84d3242010-11-16 19:27:12 -0800693
694 return tc;
Yi Zou26f23d82009-11-06 12:56:00 +0000695}
696
John Fastabendc84d3242010-11-16 19:27:12 -0800697static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
Auke Kok9a799d72007-09-15 14:07:45 -0700698{
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700699 struct ixgbe_hw *hw = &adapter->hw;
John Fastabendc84d3242010-11-16 19:27:12 -0800700 struct ixgbe_hw_stats *hwstats = &adapter->stats;
701 u32 data = 0;
702 u32 xoff[8] = {0};
703 int i;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700704
John Fastabendc84d3242010-11-16 19:27:12 -0800705 if ((hw->fc.current_mode == ixgbe_fc_full) ||
706 (hw->fc.current_mode == ixgbe_fc_rx_pause)) {
707 switch (hw->mac.type) {
708 case ixgbe_mac_82598EB:
709 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
710 break;
711 default:
712 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
713 }
714 hwstats->lxoffrxc += data;
715
716 /* refill credits (no tx hang) if we received xoff */
717 if (!data)
718 return;
719
720 for (i = 0; i < adapter->num_tx_queues; i++)
721 clear_bit(__IXGBE_HANG_CHECK_ARMED,
722 &adapter->tx_ring[i]->state);
723 return;
724 } else if (!(adapter->dcb_cfg.pfc_mode_enable))
725 return;
726
727 /* update stats for each tc, only valid with PFC enabled */
728 for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
729 switch (hw->mac.type) {
730 case ixgbe_mac_82598EB:
731 xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
732 break;
733 default:
734 xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
735 }
736 hwstats->pxoffrxc[i] += xoff[i];
Auke Kok9a799d72007-09-15 14:07:45 -0700737 }
738
John Fastabendc84d3242010-11-16 19:27:12 -0800739 /* disarm tx queues that have received xoff frames */
740 for (i = 0; i < adapter->num_tx_queues; i++) {
741 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
742 u32 tc = ixgbe_dcb_txq_to_tc(adapter, tx_ring->reg_idx);
743
744 if (xoff[tc])
745 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
746 }
747}
748
749static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
750{
751 return ring->tx_stats.completed;
752}
753
754static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
755{
756 struct ixgbe_adapter *adapter = netdev_priv(ring->netdev);
757 struct ixgbe_hw *hw = &adapter->hw;
758
759 u32 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
760 u32 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
761
762 if (head != tail)
763 return (head < tail) ?
764 tail - head : (tail + ring->count - head);
765
766 return 0;
767}
768
769static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
770{
771 u32 tx_done = ixgbe_get_tx_completed(tx_ring);
772 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
773 u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
774 bool ret = false;
775
776 clear_check_for_tx_hang(tx_ring);
777
778 /*
779 * Check for a hung queue, but be thorough. This verifies
780 * that a transmit has been completed since the previous
781 * check AND there is at least one packet pending. The
782 * ARMED bit is set to indicate a potential hang. The
783 * bit is cleared if a pause frame is received to remove
784 * false hang detection due to PFC or 802.3x frames. By
785 * requiring this to fail twice we avoid races with
786 * pfc clearing the ARMED bit and conditions where we
787 * run the check_tx_hang logic with a transmit completion
788 * pending but without time to complete it yet.
789 */
790 if ((tx_done_old == tx_done) && tx_pending) {
791 /* make sure it is true for two checks in a row */
792 ret = test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
793 &tx_ring->state);
794 } else {
795 /* update completed stats and continue */
796 tx_ring->tx_stats.tx_done_old = tx_done;
797 /* reset the countdown */
798 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
799 }
800
801 return ret;
Auke Kok9a799d72007-09-15 14:07:45 -0700802}
803
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700804#define IXGBE_MAX_TXD_PWR 14
805#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800806
807/* Tx Descriptors needed, worst case */
808#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
809 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
810#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700811 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800812
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700813static void ixgbe_tx_timeout(struct net_device *netdev);
814
Auke Kok9a799d72007-09-15 14:07:45 -0700815/**
816 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
Alexander Duyckfe49f042009-06-04 16:00:09 +0000817 * @q_vector: structure containing interrupt and ring information
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700818 * @tx_ring: tx ring to clean
Auke Kok9a799d72007-09-15 14:07:45 -0700819 **/
Alexander Duyckfe49f042009-06-04 16:00:09 +0000820static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
Joe Perchese8e9f692010-09-07 21:34:53 +0000821 struct ixgbe_ring *tx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -0700822{
Alexander Duyckfe49f042009-06-04 16:00:09 +0000823 struct ixgbe_adapter *adapter = q_vector->adapter;
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800824 union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
825 struct ixgbe_tx_buffer *tx_buffer_info;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700826 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyckb9537992010-11-16 19:26:58 -0800827 u16 i, eop, count = 0;
Auke Kok9a799d72007-09-15 14:07:45 -0700828
829 i = tx_ring->next_to_clean;
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800830 eop = tx_ring->tx_buffer_info[i].next_to_watch;
Alexander Duyck31f05a22010-08-19 13:40:31 +0000831 eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800832
833 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
Jesse Brandeburg9a1a69ad2009-03-13 22:14:10 +0000834 (count < tx_ring->work_limit)) {
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800835 bool cleaned = false;
Jeff Kirsher2d0bb1c2010-08-08 16:02:31 +0000836 rmb(); /* read buffer_info after eop_desc */
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800837 for ( ; !cleaned; count++) {
Alexander Duyck31f05a22010-08-19 13:40:31 +0000838 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
Auke Kok9a799d72007-09-15 14:07:45 -0700839 tx_buffer_info = &tx_ring->tx_buffer_info[i];
Auke Kok9a799d72007-09-15 14:07:45 -0700840
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800841 tx_desc->wb.status = 0;
Alexander Duyck8ad494b2010-11-16 19:26:47 -0800842 cleaned = (i == eop);
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800843
Auke Kok9a799d72007-09-15 14:07:45 -0700844 i++;
845 if (i == tx_ring->count)
846 i = 0;
Alexander Duyck8ad494b2010-11-16 19:26:47 -0800847
848 if (cleaned && tx_buffer_info->skb) {
849 total_bytes += tx_buffer_info->bytecount;
850 total_packets += tx_buffer_info->gso_segs;
851 }
852
Alexander Duyckb6ec8952010-11-16 19:26:49 -0800853 ixgbe_unmap_and_free_tx_resource(tx_ring,
Alexander Duyck8ad494b2010-11-16 19:26:47 -0800854 tx_buffer_info);
Auke Kok9a799d72007-09-15 14:07:45 -0700855 }
856
John Fastabendc84d3242010-11-16 19:27:12 -0800857 tx_ring->tx_stats.completed++;
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800858 eop = tx_ring->tx_buffer_info[i].next_to_watch;
Alexander Duyck31f05a22010-08-19 13:40:31 +0000859 eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800860 }
861
Auke Kok9a799d72007-09-15 14:07:45 -0700862 tx_ring->next_to_clean = i;
Alexander Duyckb9537992010-11-16 19:26:58 -0800863 tx_ring->total_bytes += total_bytes;
864 tx_ring->total_packets += total_packets;
865 u64_stats_update_begin(&tx_ring->syncp);
866 tx_ring->stats.packets += total_packets;
867 tx_ring->stats.bytes += total_bytes;
868 u64_stats_update_end(&tx_ring->syncp);
869
John Fastabendc84d3242010-11-16 19:27:12 -0800870 if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
Alexander Duyckb9537992010-11-16 19:26:58 -0800871 /* schedule immediate reset if we believe we hung */
John Fastabendc84d3242010-11-16 19:27:12 -0800872 struct ixgbe_hw *hw = &adapter->hw;
873 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
874 e_err(drv, "Detected Tx Unit Hang\n"
875 " Tx Queue <%d>\n"
876 " TDH, TDT <%x>, <%x>\n"
877 " next_to_use <%x>\n"
878 " next_to_clean <%x>\n"
879 "tx_buffer_info[next_to_clean]\n"
880 " time_stamp <%lx>\n"
881 " jiffies <%lx>\n",
882 tx_ring->queue_index,
883 IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
884 IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
885 tx_ring->next_to_use, eop,
886 tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
887
888 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
889
890 e_info(probe,
891 "tx hang %d detected on queue %d, resetting adapter\n",
892 adapter->tx_timeout_count + 1, tx_ring->queue_index);
893
894 /* schedule immediate reset if we believe we hung */
Alexander Duyckb9537992010-11-16 19:26:58 -0800895 ixgbe_tx_timeout(adapter->netdev);
896
897 /* the adapter is about to reset, no point in enabling stuff */
898 return true;
899 }
Auke Kok9a799d72007-09-15 14:07:45 -0700900
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800901#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
Alexander Duyckfc77dc32010-11-16 19:26:51 -0800902 if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
Joe Perchese8e9f692010-09-07 21:34:53 +0000903 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800904 /* Make sure that anybody stopping the queue after this
905 * sees the new next_to_clean.
906 */
907 smp_mb();
Alexander Duyckfc77dc32010-11-16 19:26:51 -0800908 if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) &&
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -0800909 !test_bit(__IXGBE_DOWN, &adapter->state)) {
Alexander Duyckfc77dc32010-11-16 19:26:51 -0800910 netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index);
Alexander Duyck5b7da512010-11-16 19:26:50 -0800911 ++tx_ring->tx_stats.restart_queue;
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -0800912 }
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800913 }
Auke Kok9a799d72007-09-15 14:07:45 -0700914
Eric Dumazet807540b2010-09-23 05:40:09 +0000915 return count < tx_ring->work_limit;
Auke Kok9a799d72007-09-15 14:07:45 -0700916}
917
Jeff Garzik5dd2d332008-10-16 05:09:31 -0400918#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800919static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
Alexander Duyck33cf09c2010-11-16 19:26:55 -0800920 struct ixgbe_ring *rx_ring,
921 int cpu)
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800922{
Alexander Duyck33cf09c2010-11-16 19:26:55 -0800923 struct ixgbe_hw *hw = &adapter->hw;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800924 u32 rxctrl;
Alexander Duyck33cf09c2010-11-16 19:26:55 -0800925 u8 reg_idx = rx_ring->reg_idx;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800926
Alexander Duyck33cf09c2010-11-16 19:26:55 -0800927 rxctrl = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(reg_idx));
928 switch (hw->mac.type) {
929 case ixgbe_mac_82598EB:
930 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
931 rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
932 break;
933 case ixgbe_mac_82599EB:
Don Skidmoreb93a2222010-11-16 19:27:17 -0800934 case ixgbe_mac_X540:
Alexander Duyck33cf09c2010-11-16 19:26:55 -0800935 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
936 rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
937 IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
938 break;
939 default:
940 break;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800941 }
Alexander Duyck33cf09c2010-11-16 19:26:55 -0800942 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
943 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
944 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
945 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
946 IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
947 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800948}
949
950static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
Alexander Duyck33cf09c2010-11-16 19:26:55 -0800951 struct ixgbe_ring *tx_ring,
952 int cpu)
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800953{
Don Skidmoreee5f7842009-11-06 12:56:20 +0000954 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck33cf09c2010-11-16 19:26:55 -0800955 u32 txctrl;
956 u8 reg_idx = tx_ring->reg_idx;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800957
Alexander Duyck33cf09c2010-11-16 19:26:55 -0800958 switch (hw->mac.type) {
959 case ixgbe_mac_82598EB:
960 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(reg_idx));
961 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
962 txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
963 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
964 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
965 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(reg_idx), txctrl);
966 break;
967 case ixgbe_mac_82599EB:
Don Skidmoreb93a2222010-11-16 19:27:17 -0800968 case ixgbe_mac_X540:
Alexander Duyck33cf09c2010-11-16 19:26:55 -0800969 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx));
970 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
971 txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
972 IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
973 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
974 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
975 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx), txctrl);
976 break;
977 default:
978 break;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800979 }
Alexander Duyck33cf09c2010-11-16 19:26:55 -0800980}
981
982static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
983{
984 struct ixgbe_adapter *adapter = q_vector->adapter;
985 int cpu = get_cpu();
986 long r_idx;
987 int i;
988
989 if (q_vector->cpu == cpu)
990 goto out_no_update;
991
992 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
993 for (i = 0; i < q_vector->txr_count; i++) {
994 ixgbe_update_tx_dca(adapter, adapter->tx_ring[r_idx], cpu);
995 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
996 r_idx + 1);
997 }
998
999 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1000 for (i = 0; i < q_vector->rxr_count; i++) {
1001 ixgbe_update_rx_dca(adapter, adapter->rx_ring[r_idx], cpu);
1002 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1003 r_idx + 1);
1004 }
1005
1006 q_vector->cpu = cpu;
1007out_no_update:
Jeb Cramerbd0362d2008-03-03 15:04:02 -08001008 put_cpu();
1009}
1010
1011static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
1012{
Alexander Duyck33cf09c2010-11-16 19:26:55 -08001013 int num_q_vectors;
Jeb Cramerbd0362d2008-03-03 15:04:02 -08001014 int i;
1015
1016 if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
1017 return;
1018
Alexander Duycke35ec122009-05-21 13:07:12 +00001019 /* always use CB2 mode, difference is masked in the CB driver */
1020 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
1021
Alexander Duyck33cf09c2010-11-16 19:26:55 -08001022 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
1023 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1024 else
1025 num_q_vectors = 1;
1026
1027 for (i = 0; i < num_q_vectors; i++) {
1028 adapter->q_vector[i]->cpu = -1;
1029 ixgbe_update_dca(adapter->q_vector[i]);
Jeb Cramerbd0362d2008-03-03 15:04:02 -08001030 }
1031}
1032
1033static int __ixgbe_notify_dca(struct device *dev, void *data)
1034{
Alexander Duyckc60fbb02010-11-16 19:26:54 -08001035 struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
Jeb Cramerbd0362d2008-03-03 15:04:02 -08001036 unsigned long event = *(unsigned long *)data;
1037
Alexander Duyck33cf09c2010-11-16 19:26:55 -08001038 if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
1039 return 0;
1040
Jeb Cramerbd0362d2008-03-03 15:04:02 -08001041 switch (event) {
1042 case DCA_PROVIDER_ADD:
Jesse Brandeburg96b0e0f2008-08-26 04:27:21 -07001043 /* if we're already enabled, don't do it again */
1044 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1045 break;
Denis V. Lunev652f0932008-03-27 14:39:17 +03001046 if (dca_add_requester(dev) == 0) {
Jesse Brandeburg96b0e0f2008-08-26 04:27:21 -07001047 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
Jeb Cramerbd0362d2008-03-03 15:04:02 -08001048 ixgbe_setup_dca(adapter);
1049 break;
1050 }
1051 /* Fall Through since DCA is disabled. */
1052 case DCA_PROVIDER_REMOVE:
1053 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
1054 dca_remove_requester(dev);
1055 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
1056 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
1057 }
1058 break;
1059 }
1060
Denis V. Lunev652f0932008-03-27 14:39:17 +03001061 return 0;
Jeb Cramerbd0362d2008-03-03 15:04:02 -08001062}
1063
Jeff Garzik5dd2d332008-10-16 05:09:31 -04001064#endif /* CONFIG_IXGBE_DCA */
Auke Kok9a799d72007-09-15 14:07:45 -07001065/**
1066 * ixgbe_receive_skb - Send a completed packet up the stack
1067 * @adapter: board private structure
1068 * @skb: packet to send up
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07001069 * @status: hardware indication of status of receive
1070 * @rx_ring: rx descriptor ring (for a specific queue) to setup
1071 * @rx_desc: rx descriptor
Auke Kok9a799d72007-09-15 14:07:45 -07001072 **/
Herbert Xu78b6f4c2009-01-18 21:49:45 -08001073static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
Joe Perchese8e9f692010-09-07 21:34:53 +00001074 struct sk_buff *skb, u8 status,
1075 struct ixgbe_ring *ring,
1076 union ixgbe_adv_rx_desc *rx_desc)
Auke Kok9a799d72007-09-15 14:07:45 -07001077{
Herbert Xu78b6f4c2009-01-18 21:49:45 -08001078 struct ixgbe_adapter *adapter = q_vector->adapter;
1079 struct napi_struct *napi = &q_vector->napi;
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07001080 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
1081 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
Auke Kok9a799d72007-09-15 14:07:45 -07001082
Jesse Grossf62bbb52010-10-20 13:56:10 +00001083 if (is_vlan && (tag & VLAN_VID_MASK))
1084 __vlan_hwaccel_put_tag(skb, tag);
1085
1086 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
1087 napi_gro_receive(napi, skb);
1088 else
1089 netif_rx(skb);
Auke Kok9a799d72007-09-15 14:07:45 -07001090}
1091
Ayyappan Veeraiyane59bd252008-02-01 15:59:09 -08001092/**
1093 * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
1094 * @adapter: address of board private structure
1095 * @status_err: hardware indication of status of receive
1096 * @skb: skb currently being received and modified
1097 **/
Auke Kok9a799d72007-09-15 14:07:45 -07001098static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
Don Skidmore8bae1b22009-07-23 18:00:39 +00001099 union ixgbe_adv_rx_desc *rx_desc,
1100 struct sk_buff *skb)
Auke Kok9a799d72007-09-15 14:07:45 -07001101{
Don Skidmore8bae1b22009-07-23 18:00:39 +00001102 u32 status_err = le32_to_cpu(rx_desc->wb.upper.status_error);
1103
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001104 skb_checksum_none_assert(skb);
Auke Kok9a799d72007-09-15 14:07:45 -07001105
Jesse Brandeburg712744b2008-08-26 04:26:56 -07001106 /* Rx csum disabled */
1107 if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
Auke Kok9a799d72007-09-15 14:07:45 -07001108 return;
Ayyappan Veeraiyane59bd252008-02-01 15:59:09 -08001109
1110 /* if IP and error */
1111 if ((status_err & IXGBE_RXD_STAT_IPCS) &&
1112 (status_err & IXGBE_RXDADV_ERR_IPE)) {
Auke Kok9a799d72007-09-15 14:07:45 -07001113 adapter->hw_csum_rx_error++;
1114 return;
1115 }
Ayyappan Veeraiyane59bd252008-02-01 15:59:09 -08001116
1117 if (!(status_err & IXGBE_RXD_STAT_L4CS))
1118 return;
1119
1120 if (status_err & IXGBE_RXDADV_ERR_TCPE) {
Don Skidmore8bae1b22009-07-23 18:00:39 +00001121 u16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1122
1123 /*
1124 * 82599 errata, UDP frames with a 0 checksum can be marked as
1125 * checksum errors.
1126 */
1127 if ((pkt_info & IXGBE_RXDADV_PKTTYPE_UDP) &&
1128 (adapter->hw.mac.type == ixgbe_mac_82599EB))
1129 return;
1130
Ayyappan Veeraiyane59bd252008-02-01 15:59:09 -08001131 adapter->hw_csum_rx_error++;
1132 return;
1133 }
1134
Auke Kok9a799d72007-09-15 14:07:45 -07001135 /* It must be a TCP or UDP packet with a valid checksum */
Ayyappan Veeraiyane59bd252008-02-01 15:59:09 -08001136 skb->ip_summed = CHECKSUM_UNNECESSARY;
Auke Kok9a799d72007-09-15 14:07:45 -07001137}
1138
Alexander Duyck84ea2592010-11-16 19:26:49 -08001139static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001140{
1141 /*
1142 * Force memory writes to complete before letting h/w
1143 * know there are new descriptors to fetch. (Only
1144 * applicable for weak-ordered memory model archs,
1145 * such as IA-64).
1146 */
1147 wmb();
Alexander Duyck84ea2592010-11-16 19:26:49 -08001148 writel(val, rx_ring->tail);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001149}
1150
Auke Kok9a799d72007-09-15 14:07:45 -07001151/**
1152 * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
Alexander Duyckfc77dc32010-11-16 19:26:51 -08001153 * @rx_ring: ring to place buffers on
1154 * @cleaned_count: number of buffers to replace
Auke Kok9a799d72007-09-15 14:07:45 -07001155 **/
Alexander Duyckfc77dc32010-11-16 19:26:51 -08001156void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
Auke Kok9a799d72007-09-15 14:07:45 -07001157{
Auke Kok9a799d72007-09-15 14:07:45 -07001158 union ixgbe_adv_rx_desc *rx_desc;
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001159 struct ixgbe_rx_buffer *bi;
Alexander Duyckd5f398e2010-11-16 19:26:48 -08001160 struct sk_buff *skb;
1161 u16 i = rx_ring->next_to_use;
Auke Kok9a799d72007-09-15 14:07:45 -07001162
Alexander Duyckfc77dc32010-11-16 19:26:51 -08001163 /* do nothing if no valid netdev defined */
1164 if (!rx_ring->netdev)
1165 return;
1166
Auke Kok9a799d72007-09-15 14:07:45 -07001167 while (cleaned_count--) {
Alexander Duyck31f05a22010-08-19 13:40:31 +00001168 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
Alexander Duyckd5f398e2010-11-16 19:26:48 -08001169 bi = &rx_ring->rx_buffer_info[i];
1170 skb = bi->skb;
Auke Kok9a799d72007-09-15 14:07:45 -07001171
Alexander Duyckd5f398e2010-11-16 19:26:48 -08001172 if (!skb) {
Alexander Duyckfc77dc32010-11-16 19:26:51 -08001173 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
Alexander Duyckd5f398e2010-11-16 19:26:48 -08001174 rx_ring->rx_buf_len);
Auke Kok9a799d72007-09-15 14:07:45 -07001175 if (!skb) {
Alexander Duyck5b7da512010-11-16 19:26:50 -08001176 rx_ring->rx_stats.alloc_rx_buff_failed++;
Auke Kok9a799d72007-09-15 14:07:45 -07001177 goto no_buffers;
1178 }
Alexander Duyckd716a7d2010-08-19 13:33:41 +00001179 /* initialize queue mapping */
1180 skb_record_rx_queue(skb, rx_ring->queue_index);
Alexander Duyckd5f398e2010-11-16 19:26:48 -08001181 bi->skb = skb;
Alexander Duyckd716a7d2010-08-19 13:33:41 +00001182 }
Auke Kok9a799d72007-09-15 14:07:45 -07001183
Alexander Duyckd716a7d2010-08-19 13:33:41 +00001184 if (!bi->dma) {
Alexander Duyckb6ec8952010-11-16 19:26:49 -08001185 bi->dma = dma_map_single(rx_ring->dev,
Alexander Duyckd5f398e2010-11-16 19:26:48 -08001186 skb->data,
Joe Perchese8e9f692010-09-07 21:34:53 +00001187 rx_ring->rx_buf_len,
Nick Nunley1b507732010-04-27 13:10:27 +00001188 DMA_FROM_DEVICE);
Alexander Duyckb6ec8952010-11-16 19:26:49 -08001189 if (dma_mapping_error(rx_ring->dev, bi->dma)) {
Alexander Duyck5b7da512010-11-16 19:26:50 -08001190 rx_ring->rx_stats.alloc_rx_buff_failed++;
Alexander Duyckd5f398e2010-11-16 19:26:48 -08001191 bi->dma = 0;
1192 goto no_buffers;
1193 }
Auke Kok9a799d72007-09-15 14:07:45 -07001194 }
Alexander Duyckd5f398e2010-11-16 19:26:48 -08001195
Alexander Duyck7d637bc2010-11-16 19:26:56 -08001196 if (ring_is_ps_enabled(rx_ring)) {
Alexander Duyckd5f398e2010-11-16 19:26:48 -08001197 if (!bi->page) {
Alexander Duyckfc77dc32010-11-16 19:26:51 -08001198 bi->page = netdev_alloc_page(rx_ring->netdev);
Alexander Duyckd5f398e2010-11-16 19:26:48 -08001199 if (!bi->page) {
Alexander Duyck5b7da512010-11-16 19:26:50 -08001200 rx_ring->rx_stats.alloc_rx_page_failed++;
Alexander Duyckd5f398e2010-11-16 19:26:48 -08001201 goto no_buffers;
1202 }
1203 }
1204
1205 if (!bi->page_dma) {
1206 /* use a half page if we're re-using */
1207 bi->page_offset ^= PAGE_SIZE / 2;
Alexander Duyckb6ec8952010-11-16 19:26:49 -08001208 bi->page_dma = dma_map_page(rx_ring->dev,
Alexander Duyckd5f398e2010-11-16 19:26:48 -08001209 bi->page,
1210 bi->page_offset,
1211 PAGE_SIZE / 2,
1212 DMA_FROM_DEVICE);
Alexander Duyckb6ec8952010-11-16 19:26:49 -08001213 if (dma_mapping_error(rx_ring->dev,
Alexander Duyckd5f398e2010-11-16 19:26:48 -08001214 bi->page_dma)) {
Alexander Duyck5b7da512010-11-16 19:26:50 -08001215 rx_ring->rx_stats.alloc_rx_page_failed++;
Alexander Duyckd5f398e2010-11-16 19:26:48 -08001216 bi->page_dma = 0;
1217 goto no_buffers;
1218 }
1219 }
1220
1221 /* Refresh the desc even if buffer_addrs didn't change
1222 * because each write-back erases this info. */
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001223 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
1224 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
Auke Kok9a799d72007-09-15 14:07:45 -07001225 } else {
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001226 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
Alexander Duyck84418e32010-08-19 13:40:54 +00001227 rx_desc->read.hdr_addr = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07001228 }
1229
1230 i++;
1231 if (i == rx_ring->count)
1232 i = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07001233 }
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07001234
Auke Kok9a799d72007-09-15 14:07:45 -07001235no_buffers:
1236 if (rx_ring->next_to_use != i) {
1237 rx_ring->next_to_use = i;
Alexander Duyck84ea2592010-11-16 19:26:49 -08001238 ixgbe_release_rx_desc(rx_ring, i);
Auke Kok9a799d72007-09-15 14:07:45 -07001239 }
1240}
1241
Alexander Duyckc267fc12010-11-16 19:27:00 -08001242static inline u16 ixgbe_get_hlen(union ixgbe_adv_rx_desc *rx_desc)
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07001243{
Alexander Duyckc267fc12010-11-16 19:27:00 -08001244 /* HW will not DMA in data larger than the given buffer, even if it
1245 * parses the (NFS, of course) header to be larger. In that case, it
1246 * fills the header buffer and spills the rest into the page.
1247 */
1248 u16 hdr_info = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info);
1249 u16 hlen = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
1250 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
1251 if (hlen > IXGBE_RX_HDR_SIZE)
1252 hlen = IXGBE_RX_HDR_SIZE;
1253 return hlen;
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07001254}
1255
Alexander Duyckf8212f92009-04-27 22:42:37 +00001256/**
1257 * ixgbe_transform_rsc_queue - change rsc queue into a full packet
1258 * @skb: pointer to the last skb in the rsc queue
1259 *
1260 * This function changes a queue full of hw rsc buffers into a completed
1261 * packet. It uses the ->prev pointers to find the first packet and then
1262 * turns it into the frag list owner.
1263 **/
Alexander Duyckaa801752010-11-16 19:27:02 -08001264static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb)
Alexander Duyckf8212f92009-04-27 22:42:37 +00001265{
1266 unsigned int frag_list_size = 0;
Alexander Duyckaa801752010-11-16 19:27:02 -08001267 unsigned int skb_cnt = 1;
Alexander Duyckf8212f92009-04-27 22:42:37 +00001268
1269 while (skb->prev) {
1270 struct sk_buff *prev = skb->prev;
1271 frag_list_size += skb->len;
1272 skb->prev = NULL;
1273 skb = prev;
Alexander Duyckaa801752010-11-16 19:27:02 -08001274 skb_cnt++;
Alexander Duyckf8212f92009-04-27 22:42:37 +00001275 }
1276
1277 skb_shinfo(skb)->frag_list = skb->next;
1278 skb->next = NULL;
1279 skb->len += frag_list_size;
1280 skb->data_len += frag_list_size;
1281 skb->truesize += frag_list_size;
Alexander Duyckaa801752010-11-16 19:27:02 -08001282 IXGBE_RSC_CB(skb)->skb_cnt = skb_cnt;
1283
Alexander Duyckf8212f92009-04-27 22:42:37 +00001284 return skb;
1285}
1286
Alexander Duyckaa801752010-11-16 19:27:02 -08001287static inline bool ixgbe_get_rsc_state(union ixgbe_adv_rx_desc *rx_desc)
1288{
1289 return !!(le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
1290 IXGBE_RXDADV_RSCCNT_MASK);
1291}
Mallikarjuna R Chilakala43634e82010-02-25 23:14:37 +00001292
Alexander Duyckc267fc12010-11-16 19:27:00 -08001293static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
Joe Perchese8e9f692010-09-07 21:34:53 +00001294 struct ixgbe_ring *rx_ring,
1295 int *work_done, int work_to_do)
Auke Kok9a799d72007-09-15 14:07:45 -07001296{
Herbert Xu78b6f4c2009-01-18 21:49:45 -08001297 struct ixgbe_adapter *adapter = q_vector->adapter;
Auke Kok9a799d72007-09-15 14:07:45 -07001298 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
1299 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
1300 struct sk_buff *skb;
Ayyappan Veeraiyand2f4fbe2008-02-01 15:59:19 -08001301 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
Alexander Duyckc267fc12010-11-16 19:27:00 -08001302 const int current_node = numa_node_id();
Yi Zou3d8fd382009-06-08 14:38:44 +00001303#ifdef IXGBE_FCOE
1304 int ddp_bytes = 0;
1305#endif /* IXGBE_FCOE */
Alexander Duyckc267fc12010-11-16 19:27:00 -08001306 u32 staterr;
1307 u16 i;
1308 u16 cleaned_count = 0;
Alexander Duyckaa801752010-11-16 19:27:02 -08001309 bool pkt_is_rsc = false;
Auke Kok9a799d72007-09-15 14:07:45 -07001310
1311 i = rx_ring->next_to_clean;
Alexander Duyck31f05a22010-08-19 13:40:31 +00001312 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
Auke Kok9a799d72007-09-15 14:07:45 -07001313 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
Auke Kok9a799d72007-09-15 14:07:45 -07001314
1315 while (staterr & IXGBE_RXD_STAT_DD) {
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07001316 u32 upper_len = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07001317
Milton Miller3c945e52010-02-19 17:44:42 +00001318 rmb(); /* read descriptor and rx_buffer_info after status DD */
Auke Kok9a799d72007-09-15 14:07:45 -07001319
Alexander Duyckc267fc12010-11-16 19:27:00 -08001320 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1321
Auke Kok9a799d72007-09-15 14:07:45 -07001322 skb = rx_buffer_info->skb;
Auke Kok9a799d72007-09-15 14:07:45 -07001323 rx_buffer_info->skb = NULL;
Alexander Duyckc267fc12010-11-16 19:27:00 -08001324 prefetch(skb->data);
Auke Kok9a799d72007-09-15 14:07:45 -07001325
Alexander Duyckc267fc12010-11-16 19:27:00 -08001326 if (ring_is_rsc_enabled(rx_ring))
Alexander Duyckaa801752010-11-16 19:27:02 -08001327 pkt_is_rsc = ixgbe_get_rsc_state(rx_desc);
Alexander Duyckc267fc12010-11-16 19:27:00 -08001328
1329 /* if this is a skb from previous receive DMA will be 0 */
Alexander Duyck21fa4e62009-06-04 15:59:49 +00001330 if (rx_buffer_info->dma) {
Alexander Duyckc267fc12010-11-16 19:27:00 -08001331 u16 hlen;
Alexander Duyckaa801752010-11-16 19:27:02 -08001332 if (pkt_is_rsc &&
Alexander Duyckc267fc12010-11-16 19:27:00 -08001333 !(staterr & IXGBE_RXD_STAT_EOP) &&
1334 !skb->prev) {
Mallikarjuna R Chilakala43634e82010-02-25 23:14:37 +00001335 /*
1336 * When HWRSC is enabled, delay unmapping
1337 * of the first packet. It carries the
1338 * header information, HW may still
1339 * access the header after the writeback.
1340 * Only unmap it when EOP is reached
1341 */
Mallikarjuna R Chilakalae8171aa2010-05-13 17:33:21 +00001342 IXGBE_RSC_CB(skb)->delay_unmap = true;
Mallikarjuna R Chilakala43634e82010-02-25 23:14:37 +00001343 IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
Mallikarjuna R Chilakalae8171aa2010-05-13 17:33:21 +00001344 } else {
Alexander Duyckb6ec8952010-11-16 19:26:49 -08001345 dma_unmap_single(rx_ring->dev,
Joe Perchese8e9f692010-09-07 21:34:53 +00001346 rx_buffer_info->dma,
1347 rx_ring->rx_buf_len,
1348 DMA_FROM_DEVICE);
Mallikarjuna R Chilakalae8171aa2010-05-13 17:33:21 +00001349 }
Jesse Brandeburg4f57ca62009-06-30 11:44:56 +00001350 rx_buffer_info->dma = 0;
Alexander Duyckc267fc12010-11-16 19:27:00 -08001351
1352 if (ring_is_ps_enabled(rx_ring)) {
1353 hlen = ixgbe_get_hlen(rx_desc);
1354 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
1355 } else {
1356 hlen = le16_to_cpu(rx_desc->wb.upper.length);
1357 }
1358
1359 skb_put(skb, hlen);
1360 } else {
1361 /* assume packet split since header is unmapped */
1362 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
Auke Kok9a799d72007-09-15 14:07:45 -07001363 }
1364
1365 if (upper_len) {
Alexander Duyckb6ec8952010-11-16 19:26:49 -08001366 dma_unmap_page(rx_ring->dev,
1367 rx_buffer_info->page_dma,
1368 PAGE_SIZE / 2,
1369 DMA_FROM_DEVICE);
Auke Kok9a799d72007-09-15 14:07:45 -07001370 rx_buffer_info->page_dma = 0;
1371 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
Joe Perchese8e9f692010-09-07 21:34:53 +00001372 rx_buffer_info->page,
1373 rx_buffer_info->page_offset,
1374 upper_len);
Jesse Brandeburg762f4c52008-09-11 19:58:43 -07001375
Alexander Duyckc267fc12010-11-16 19:27:00 -08001376 if ((page_count(rx_buffer_info->page) == 1) &&
1377 (page_to_nid(rx_buffer_info->page) == current_node))
Jesse Brandeburg762f4c52008-09-11 19:58:43 -07001378 get_page(rx_buffer_info->page);
Alexander Duyckc267fc12010-11-16 19:27:00 -08001379 else
1380 rx_buffer_info->page = NULL;
Auke Kok9a799d72007-09-15 14:07:45 -07001381
1382 skb->len += upper_len;
1383 skb->data_len += upper_len;
1384 skb->truesize += upper_len;
1385 }
1386
1387 i++;
1388 if (i == rx_ring->count)
1389 i = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07001390
Alexander Duyck31f05a22010-08-19 13:40:31 +00001391 next_rxd = IXGBE_RX_DESC_ADV(rx_ring, i);
Auke Kok9a799d72007-09-15 14:07:45 -07001392 prefetch(next_rxd);
Auke Kok9a799d72007-09-15 14:07:45 -07001393 cleaned_count++;
Alexander Duyckf8212f92009-04-27 22:42:37 +00001394
Alexander Duyckaa801752010-11-16 19:27:02 -08001395 if (pkt_is_rsc) {
Alexander Duyckf8212f92009-04-27 22:42:37 +00001396 u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
1397 IXGBE_RXDADV_NEXTP_SHIFT;
1398 next_buffer = &rx_ring->rx_buffer_info[nextp];
Alexander Duyckf8212f92009-04-27 22:42:37 +00001399 } else {
1400 next_buffer = &rx_ring->rx_buffer_info[i];
1401 }
1402
Alexander Duyckc267fc12010-11-16 19:27:00 -08001403 if (!(staterr & IXGBE_RXD_STAT_EOP)) {
Alexander Duyck7d637bc2010-11-16 19:26:56 -08001404 if (ring_is_ps_enabled(rx_ring)) {
Alexander Duyckf8212f92009-04-27 22:42:37 +00001405 rx_buffer_info->skb = next_buffer->skb;
1406 rx_buffer_info->dma = next_buffer->dma;
1407 next_buffer->skb = skb;
1408 next_buffer->dma = 0;
1409 } else {
1410 skb->next = next_buffer->skb;
1411 skb->next->prev = skb;
1412 }
Alexander Duyck5b7da512010-11-16 19:26:50 -08001413 rx_ring->rx_stats.non_eop_descs++;
Auke Kok9a799d72007-09-15 14:07:45 -07001414 goto next_desc;
1415 }
1416
Alexander Duyckaa801752010-11-16 19:27:02 -08001417 if (skb->prev) {
1418 skb = ixgbe_transform_rsc_queue(skb);
1419 /* if we got here without RSC the packet is invalid */
1420 if (!pkt_is_rsc) {
1421 __pskb_trim(skb, 0);
1422 rx_buffer_info->skb = skb;
1423 goto next_desc;
1424 }
1425 }
Alexander Duyckc267fc12010-11-16 19:27:00 -08001426
1427 if (ring_is_rsc_enabled(rx_ring)) {
1428 if (IXGBE_RSC_CB(skb)->delay_unmap) {
1429 dma_unmap_single(rx_ring->dev,
1430 IXGBE_RSC_CB(skb)->dma,
1431 rx_ring->rx_buf_len,
1432 DMA_FROM_DEVICE);
1433 IXGBE_RSC_CB(skb)->dma = 0;
1434 IXGBE_RSC_CB(skb)->delay_unmap = false;
1435 }
Alexander Duyckaa801752010-11-16 19:27:02 -08001436 }
1437 if (pkt_is_rsc) {
Alexander Duyckc267fc12010-11-16 19:27:00 -08001438 if (ring_is_ps_enabled(rx_ring))
1439 rx_ring->rx_stats.rsc_count +=
Alexander Duyckaa801752010-11-16 19:27:02 -08001440 skb_shinfo(skb)->nr_frags;
Alexander Duyckc267fc12010-11-16 19:27:00 -08001441 else
Alexander Duyckaa801752010-11-16 19:27:02 -08001442 rx_ring->rx_stats.rsc_count +=
1443 IXGBE_RSC_CB(skb)->skb_cnt;
Alexander Duyckc267fc12010-11-16 19:27:00 -08001444 rx_ring->rx_stats.rsc_flush++;
1445 }
1446
1447 /* ERR_MASK will only have valid bits if EOP set */
Auke Kok9a799d72007-09-15 14:07:45 -07001448 if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
Alexander Duyckc267fc12010-11-16 19:27:00 -08001449 /* trim packet back to size 0 and recycle it */
1450 __pskb_trim(skb, 0);
1451 rx_buffer_info->skb = skb;
Auke Kok9a799d72007-09-15 14:07:45 -07001452 goto next_desc;
1453 }
1454
Don Skidmore8bae1b22009-07-23 18:00:39 +00001455 ixgbe_rx_checksum(adapter, rx_desc, skb);
Ayyappan Veeraiyand2f4fbe2008-02-01 15:59:19 -08001456
1457 /* probably a little skewed due to removing CRC */
1458 total_rx_bytes += skb->len;
1459 total_rx_packets++;
1460
Alexander Duyckfc77dc32010-11-16 19:26:51 -08001461 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
Yi Zou332d4a72009-05-13 13:11:53 +00001462#ifdef IXGBE_FCOE
1463 /* if ddp, not passing to ULD unless for FCP_RSP or error */
Yi Zou3d8fd382009-06-08 14:38:44 +00001464 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
1465 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
1466 if (!ddp_bytes)
Yi Zou332d4a72009-05-13 13:11:53 +00001467 goto next_desc;
Yi Zou3d8fd382009-06-08 14:38:44 +00001468 }
Yi Zou332d4a72009-05-13 13:11:53 +00001469#endif /* IXGBE_FCOE */
Alexander Duyckfdaff1c2009-05-06 10:43:47 +00001470 ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
Auke Kok9a799d72007-09-15 14:07:45 -07001471
1472next_desc:
1473 rx_desc->wb.upper.status_error = 0;
1474
Alexander Duyckc267fc12010-11-16 19:27:00 -08001475 (*work_done)++;
1476 if (*work_done >= work_to_do)
1477 break;
1478
Auke Kok9a799d72007-09-15 14:07:45 -07001479 /* return some buffers to hardware, one at a time is too slow */
1480 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
Alexander Duyckfc77dc32010-11-16 19:26:51 -08001481 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
Auke Kok9a799d72007-09-15 14:07:45 -07001482 cleaned_count = 0;
1483 }
1484
1485 /* use prefetched values */
1486 rx_desc = next_rxd;
Auke Kok9a799d72007-09-15 14:07:45 -07001487 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07001488 }
1489
Auke Kok9a799d72007-09-15 14:07:45 -07001490 rx_ring->next_to_clean = i;
1491 cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
1492
1493 if (cleaned_count)
Alexander Duyckfc77dc32010-11-16 19:26:51 -08001494 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
Auke Kok9a799d72007-09-15 14:07:45 -07001495
Yi Zou3d8fd382009-06-08 14:38:44 +00001496#ifdef IXGBE_FCOE
1497 /* include DDPed FCoE data */
1498 if (ddp_bytes > 0) {
1499 unsigned int mss;
1500
Alexander Duyckfc77dc32010-11-16 19:26:51 -08001501 mss = rx_ring->netdev->mtu - sizeof(struct fcoe_hdr) -
Yi Zou3d8fd382009-06-08 14:38:44 +00001502 sizeof(struct fc_frame_header) -
1503 sizeof(struct fcoe_crc_eof);
1504 if (mss > 512)
1505 mss &= ~511;
1506 total_rx_bytes += ddp_bytes;
1507 total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss);
1508 }
1509#endif /* IXGBE_FCOE */
1510
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001511 rx_ring->total_packets += total_rx_packets;
1512 rx_ring->total_bytes += total_rx_bytes;
Alexander Duyckc267fc12010-11-16 19:27:00 -08001513 u64_stats_update_begin(&rx_ring->syncp);
1514 rx_ring->stats.packets += total_rx_packets;
1515 rx_ring->stats.bytes += total_rx_bytes;
1516 u64_stats_update_end(&rx_ring->syncp);
Auke Kok9a799d72007-09-15 14:07:45 -07001517}
1518
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001519static int ixgbe_clean_rxonly(struct napi_struct *, int);
Auke Kok9a799d72007-09-15 14:07:45 -07001520/**
1521 * ixgbe_configure_msix - Configure MSI-X hardware
1522 * @adapter: board private structure
1523 *
1524 * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
1525 * interrupts.
1526 **/
1527static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1528{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001529 struct ixgbe_q_vector *q_vector;
Alexander Duyckbf29ee62010-11-16 19:27:07 -08001530 int i, q_vectors, v_idx, r_idx;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001531 u32 mask;
Auke Kok9a799d72007-09-15 14:07:45 -07001532
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001533 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1534
Jesse Brandeburg4df10462009-03-13 22:15:31 +00001535 /*
1536 * Populate the IVAR table and set the ITR values to the
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001537 * corresponding register.
1538 */
1539 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
Alexander Duyck7a921c92009-05-06 10:43:28 +00001540 q_vector = adapter->q_vector[v_idx];
Akinobu Mita984b3f52010-03-05 13:41:37 -08001541 /* XXX for_each_set_bit(...) */
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001542 r_idx = find_first_bit(q_vector->rxr_idx,
Joe Perchese8e9f692010-09-07 21:34:53 +00001543 adapter->num_rx_queues);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001544
1545 for (i = 0; i < q_vector->rxr_count; i++) {
Alexander Duyckbf29ee62010-11-16 19:27:07 -08001546 u8 reg_idx = adapter->rx_ring[r_idx]->reg_idx;
1547 ixgbe_set_ivar(adapter, 0, reg_idx, v_idx);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001548 r_idx = find_next_bit(q_vector->rxr_idx,
Joe Perchese8e9f692010-09-07 21:34:53 +00001549 adapter->num_rx_queues,
1550 r_idx + 1);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001551 }
1552 r_idx = find_first_bit(q_vector->txr_idx,
Joe Perchese8e9f692010-09-07 21:34:53 +00001553 adapter->num_tx_queues);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001554
1555 for (i = 0; i < q_vector->txr_count; i++) {
Alexander Duyckbf29ee62010-11-16 19:27:07 -08001556 u8 reg_idx = adapter->tx_ring[r_idx]->reg_idx;
1557 ixgbe_set_ivar(adapter, 1, reg_idx, v_idx);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001558 r_idx = find_next_bit(q_vector->txr_idx,
Joe Perchese8e9f692010-09-07 21:34:53 +00001559 adapter->num_tx_queues,
1560 r_idx + 1);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001561 }
1562
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001563 if (q_vector->txr_count && !q_vector->rxr_count)
Nelson, Shannonf7554a22009-09-18 09:46:06 +00001564 /* tx only */
1565 q_vector->eitr = adapter->tx_eitr_param;
Jesse Brandeburg509ee932009-03-13 22:13:28 +00001566 else if (q_vector->rxr_count)
Nelson, Shannonf7554a22009-09-18 09:46:06 +00001567 /* rx or mixed */
1568 q_vector->eitr = adapter->rx_eitr_param;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001569
Alexander Duyckfe49f042009-06-04 16:00:09 +00001570 ixgbe_write_eitr(q_vector);
Peter Waskiewiczb25ebfd2010-10-05 01:27:49 +00001571 /* If Flow Director is enabled, set interrupt affinity */
1572 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
1573 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
1574 /*
1575 * Allocate the affinity_hint cpumask, assign the mask
1576 * for this vector, and set our affinity_hint for
1577 * this irq.
1578 */
1579 if (!alloc_cpumask_var(&q_vector->affinity_mask,
1580 GFP_KERNEL))
1581 return;
1582 cpumask_set_cpu(v_idx, q_vector->affinity_mask);
1583 irq_set_affinity_hint(adapter->msix_entries[v_idx].vector,
1584 q_vector->affinity_mask);
1585 }
Auke Kok9a799d72007-09-15 14:07:45 -07001586 }
1587
Alexander Duyckbd508172010-11-16 19:27:03 -08001588 switch (adapter->hw.mac.type) {
1589 case ixgbe_mac_82598EB:
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001590 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
Joe Perchese8e9f692010-09-07 21:34:53 +00001591 v_idx);
Alexander Duyckbd508172010-11-16 19:27:03 -08001592 break;
1593 case ixgbe_mac_82599EB:
Don Skidmoreb93a2222010-11-16 19:27:17 -08001594 case ixgbe_mac_X540:
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001595 ixgbe_set_ivar(adapter, -1, 1, v_idx);
Alexander Duyckbd508172010-11-16 19:27:03 -08001596 break;
1597
1598 default:
1599 break;
1600 }
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001601 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
Auke Kok9a799d72007-09-15 14:07:45 -07001602
Jesse Brandeburg41fb9242008-09-11 19:55:58 -07001603 /* set up to autoclear timer, and the vectors */
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001604 mask = IXGBE_EIMS_ENABLE_MASK;
Greg Rose1cdd1ec2010-01-09 02:26:46 +00001605 if (adapter->num_vfs)
1606 mask &= ~(IXGBE_EIMS_OTHER |
1607 IXGBE_EIMS_MAILBOX |
1608 IXGBE_EIMS_LSC);
1609 else
1610 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001611 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
Auke Kok9a799d72007-09-15 14:07:45 -07001612}
1613
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001614enum latency_range {
1615 lowest_latency = 0,
1616 low_latency = 1,
1617 bulk_latency = 2,
1618 latency_invalid = 255
1619};
1620
1621/**
1622 * ixgbe_update_itr - update the dynamic ITR value based on statistics
1623 * @adapter: pointer to adapter
1624 * @eitr: eitr setting (ints per sec) to give last timeslice
1625 * @itr_setting: current throttle rate in ints/second
1626 * @packets: the number of packets during this measurement interval
1627 * @bytes: the number of bytes during this measurement interval
1628 *
1629 * Stores a new ITR value based on packets and byte
1630 * counts during the last interrupt. The advantage of per interrupt
1631 * computation is faster updates and more accurate ITR for the current
1632 * traffic pattern. Constants in this function were computed
1633 * based on theoretical maximum wire speed and thresholds were set based
1634 * on testing data as well as attempting to minimize response time
1635 * while increasing bulk throughput.
1636 * this functionality is controlled by the InterruptThrottleRate module
1637 * parameter (see ixgbe_param.c)
1638 **/
1639static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter,
Joe Perchese8e9f692010-09-07 21:34:53 +00001640 u32 eitr, u8 itr_setting,
1641 int packets, int bytes)
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001642{
1643 unsigned int retval = itr_setting;
1644 u32 timepassed_us;
1645 u64 bytes_perint;
1646
1647 if (packets == 0)
1648 goto update_itr_done;
1649
1650
1651 /* simple throttlerate management
1652 * 0-20MB/s lowest (100000 ints/s)
1653 * 20-100MB/s low (20000 ints/s)
1654 * 100-1249MB/s bulk (8000 ints/s)
1655 */
1656 /* what was last interrupt timeslice? */
1657 timepassed_us = 1000000/eitr;
1658 bytes_perint = bytes / timepassed_us; /* bytes/usec */
1659
1660 switch (itr_setting) {
1661 case lowest_latency:
1662 if (bytes_perint > adapter->eitr_low)
1663 retval = low_latency;
1664 break;
1665 case low_latency:
1666 if (bytes_perint > adapter->eitr_high)
1667 retval = bulk_latency;
1668 else if (bytes_perint <= adapter->eitr_low)
1669 retval = lowest_latency;
1670 break;
1671 case bulk_latency:
1672 if (bytes_perint <= adapter->eitr_high)
1673 retval = low_latency;
1674 break;
1675 }
1676
1677update_itr_done:
1678 return retval;
1679}
1680
Jesse Brandeburg509ee932009-03-13 22:13:28 +00001681/**
1682 * ixgbe_write_eitr - write EITR register in hardware specific way
Alexander Duyckfe49f042009-06-04 16:00:09 +00001683 * @q_vector: structure containing interrupt and ring information
Jesse Brandeburg509ee932009-03-13 22:13:28 +00001684 *
1685 * This function is made to be called by ethtool and by the driver
1686 * when it needs to update EITR registers at runtime. Hardware
1687 * specific quirks/differences are taken care of here.
1688 */
Alexander Duyckfe49f042009-06-04 16:00:09 +00001689void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
Jesse Brandeburg509ee932009-03-13 22:13:28 +00001690{
Alexander Duyckfe49f042009-06-04 16:00:09 +00001691 struct ixgbe_adapter *adapter = q_vector->adapter;
Jesse Brandeburg509ee932009-03-13 22:13:28 +00001692 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyckfe49f042009-06-04 16:00:09 +00001693 int v_idx = q_vector->v_idx;
1694 u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr);
1695
Alexander Duyckbd508172010-11-16 19:27:03 -08001696 switch (adapter->hw.mac.type) {
1697 case ixgbe_mac_82598EB:
Jesse Brandeburg509ee932009-03-13 22:13:28 +00001698 /* must write high and low 16 bits to reset counter */
1699 itr_reg |= (itr_reg << 16);
Alexander Duyckbd508172010-11-16 19:27:03 -08001700 break;
1701 case ixgbe_mac_82599EB:
Don Skidmoreb93a2222010-11-16 19:27:17 -08001702 case ixgbe_mac_X540:
Jesse Brandeburg509ee932009-03-13 22:13:28 +00001703 /*
Don Skidmoreb93a2222010-11-16 19:27:17 -08001704 * 82599 and X540 can support a value of zero, so allow it for
Jesse Brandeburgf8d1dca2010-04-27 01:37:20 +00001705 * max interrupt rate, but there is an errata where it can
1706 * not be zero with RSC
1707 */
1708 if (itr_reg == 8 &&
1709 !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
1710 itr_reg = 0;
1711
1712 /*
Jesse Brandeburg509ee932009-03-13 22:13:28 +00001713 * set the WDIS bit to not clear the timer bits and cause an
1714 * immediate assertion of the interrupt
1715 */
1716 itr_reg |= IXGBE_EITR_CNT_WDIS;
Alexander Duyckbd508172010-11-16 19:27:03 -08001717 break;
1718 default:
1719 break;
Jesse Brandeburg509ee932009-03-13 22:13:28 +00001720 }
1721 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
1722}
1723
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001724static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
1725{
1726 struct ixgbe_adapter *adapter = q_vector->adapter;
Alexander Duyck125601b2010-11-16 19:27:08 -08001727 int i, r_idx;
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001728 u32 new_itr;
1729 u8 current_itr, ret_itr;
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001730
1731 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1732 for (i = 0; i < q_vector->txr_count; i++) {
Alexander Duyck125601b2010-11-16 19:27:08 -08001733 struct ixgbe_ring *tx_ring = adapter->tx_ring[r_idx];
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001734 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
Joe Perchese8e9f692010-09-07 21:34:53 +00001735 q_vector->tx_itr,
1736 tx_ring->total_packets,
1737 tx_ring->total_bytes);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001738 /* if the result for this queue would decrease interrupt
1739 * rate for this vector then use that result */
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001740 q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
Joe Perchese8e9f692010-09-07 21:34:53 +00001741 q_vector->tx_itr - 1 : ret_itr);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001742 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
Joe Perchese8e9f692010-09-07 21:34:53 +00001743 r_idx + 1);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001744 }
1745
1746 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1747 for (i = 0; i < q_vector->rxr_count; i++) {
Alexander Duyck125601b2010-11-16 19:27:08 -08001748 struct ixgbe_ring *rx_ring = adapter->rx_ring[r_idx];
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001749 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
Joe Perchese8e9f692010-09-07 21:34:53 +00001750 q_vector->rx_itr,
1751 rx_ring->total_packets,
1752 rx_ring->total_bytes);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001753 /* if the result for this queue would decrease interrupt
1754 * rate for this vector then use that result */
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001755 q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
Joe Perchese8e9f692010-09-07 21:34:53 +00001756 q_vector->rx_itr - 1 : ret_itr);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001757 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
Joe Perchese8e9f692010-09-07 21:34:53 +00001758 r_idx + 1);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001759 }
1760
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001761 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001762
1763 switch (current_itr) {
1764 /* counts and packets in update_itr are dependent on these numbers */
1765 case lowest_latency:
1766 new_itr = 100000;
1767 break;
1768 case low_latency:
1769 new_itr = 20000; /* aka hwitr = ~200 */
1770 break;
1771 case bulk_latency:
1772 default:
1773 new_itr = 8000;
1774 break;
1775 }
1776
1777 if (new_itr != q_vector->eitr) {
Alexander Duyckfe49f042009-06-04 16:00:09 +00001778 /* do an exponential smoothing */
Alexander Duyck125601b2010-11-16 19:27:08 -08001779 new_itr = ((q_vector->eitr * 9) + new_itr)/10;
Jesse Brandeburg509ee932009-03-13 22:13:28 +00001780
1781 /* save the algorithm value here, not the smoothed one */
1782 q_vector->eitr = new_itr;
Alexander Duyckfe49f042009-06-04 16:00:09 +00001783
1784 ixgbe_write_eitr(q_vector);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001785 }
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001786}
1787
Mallikarjuna R Chilakala119fc602010-05-20 23:07:06 -07001788/**
1789 * ixgbe_check_overtemp_task - worker thread to check over tempurature
1790 * @work: pointer to work_struct containing our data
1791 **/
1792static void ixgbe_check_overtemp_task(struct work_struct *work)
1793{
1794 struct ixgbe_adapter *adapter = container_of(work,
Joe Perchese8e9f692010-09-07 21:34:53 +00001795 struct ixgbe_adapter,
1796 check_overtemp_task);
Mallikarjuna R Chilakala119fc602010-05-20 23:07:06 -07001797 struct ixgbe_hw *hw = &adapter->hw;
1798 u32 eicr = adapter->interrupt_event;
1799
Joe Perches7ca647b2010-09-07 21:35:40 +00001800 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE))
1801 return;
Mallikarjuna R Chilakala119fc602010-05-20 23:07:06 -07001802
Joe Perches7ca647b2010-09-07 21:35:40 +00001803 switch (hw->device_id) {
1804 case IXGBE_DEV_ID_82599_T3_LOM: {
1805 u32 autoneg;
1806 bool link_up = false;
Mallikarjuna R Chilakala119fc602010-05-20 23:07:06 -07001807
Joe Perches7ca647b2010-09-07 21:35:40 +00001808 if (hw->mac.ops.check_link)
1809 hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
1810
1811 if (((eicr & IXGBE_EICR_GPI_SDP0) && (!link_up)) ||
1812 (eicr & IXGBE_EICR_LSC))
1813 /* Check if this is due to overtemp */
1814 if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP)
1815 break;
1816 return;
Mallikarjuna R Chilakala119fc602010-05-20 23:07:06 -07001817 }
Joe Perches7ca647b2010-09-07 21:35:40 +00001818 default:
1819 if (!(eicr & IXGBE_EICR_GPI_SDP0))
1820 return;
1821 break;
1822 }
1823 e_crit(drv,
1824 "Network adapter has been stopped because it has over heated. "
1825 "Restart the computer. If the problem persists, "
1826 "power off the system and replace the adapter\n");
1827 /* write to clear the interrupt */
1828 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0);
Mallikarjuna R Chilakala119fc602010-05-20 23:07:06 -07001829}
1830
Jesse Brandeburg0befdb32008-10-31 00:46:40 -07001831static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
1832{
1833 struct ixgbe_hw *hw = &adapter->hw;
1834
1835 if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
1836 (eicr & IXGBE_EICR_GPI_SDP1)) {
Emil Tantilov396e7992010-07-01 20:05:12 +00001837 e_crit(probe, "Fan has stopped, replace the adapter\n");
Jesse Brandeburg0befdb32008-10-31 00:46:40 -07001838 /* write to clear the interrupt */
1839 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1840 }
1841}
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07001842
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001843static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
1844{
1845 struct ixgbe_hw *hw = &adapter->hw;
1846
Alexander Duyck73c4b7c2010-11-16 19:26:57 -08001847 if (eicr & IXGBE_EICR_GPI_SDP2) {
1848 /* Clear the interrupt */
1849 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
1850 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1851 schedule_work(&adapter->sfp_config_module_task);
1852 }
1853
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001854 if (eicr & IXGBE_EICR_GPI_SDP1) {
1855 /* Clear the interrupt */
1856 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
Alexander Duyck73c4b7c2010-11-16 19:26:57 -08001857 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1858 schedule_work(&adapter->multispeed_fiber_task);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001859 }
1860}
1861
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07001862static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
1863{
1864 struct ixgbe_hw *hw = &adapter->hw;
1865
1866 adapter->lsc_int++;
1867 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
1868 adapter->link_check_timeout = jiffies;
1869 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1870 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
Nelson, Shannon8a0717f2009-11-12 18:47:11 +00001871 IXGBE_WRITE_FLUSH(hw);
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07001872 schedule_work(&adapter->watchdog_task);
1873 }
1874}
1875
Auke Kok9a799d72007-09-15 14:07:45 -07001876static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
1877{
1878 struct net_device *netdev = data;
1879 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1880 struct ixgbe_hw *hw = &adapter->hw;
Don Skidmore54037502009-02-21 15:42:56 -08001881 u32 eicr;
1882
1883 /*
1884 * Workaround for Silicon errata. Use clear-by-write instead
1885 * of clear-by-read. Reading with EICS will return the
1886 * interrupt causes without clearing, which later be done
1887 * with the write to EICR.
1888 */
1889 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1890 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
Auke Kok9a799d72007-09-15 14:07:45 -07001891
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07001892 if (eicr & IXGBE_EICR_LSC)
1893 ixgbe_check_lsc(adapter);
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08001894
Greg Rose1cdd1ec2010-01-09 02:26:46 +00001895 if (eicr & IXGBE_EICR_MAILBOX)
1896 ixgbe_msg_task(adapter);
1897
Alexander Duyckbd508172010-11-16 19:27:03 -08001898 switch (hw->mac.type) {
1899 case ixgbe_mac_82599EB:
Don Skidmoreb93a2222010-11-16 19:27:17 -08001900 case ixgbe_mac_X540:
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00001901 /* Handle Flow Director Full threshold interrupt */
1902 if (eicr & IXGBE_EICR_FLOW_DIR) {
1903 int i;
1904 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_FLOW_DIR);
1905 /* Disable transmits before FDIR Re-initialization */
1906 netif_tx_stop_all_queues(netdev);
1907 for (i = 0; i < adapter->num_tx_queues; i++) {
1908 struct ixgbe_ring *tx_ring =
Joe Perchese8e9f692010-09-07 21:34:53 +00001909 adapter->tx_ring[i];
Alexander Duyck7d637bc2010-11-16 19:26:56 -08001910 if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
1911 &tx_ring->state))
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00001912 schedule_work(&adapter->fdir_reinit_task);
1913 }
1914 }
Alexander Duyckbd508172010-11-16 19:27:03 -08001915 ixgbe_check_sfp_event(adapter, eicr);
1916 if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
1917 ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
1918 adapter->interrupt_event = eicr;
1919 schedule_work(&adapter->check_overtemp_task);
1920 }
1921 break;
1922 default:
1923 break;
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00001924 }
Alexander Duyckbd508172010-11-16 19:27:03 -08001925
1926 ixgbe_check_fan_failure(adapter, eicr);
1927
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08001928 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1929 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
Auke Kok9a799d72007-09-15 14:07:45 -07001930
1931 return IRQ_HANDLED;
1932}
1933
Alexander Duyckfe49f042009-06-04 16:00:09 +00001934static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
1935 u64 qmask)
1936{
1937 u32 mask;
Alexander Duyckbd508172010-11-16 19:27:03 -08001938 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyckfe49f042009-06-04 16:00:09 +00001939
Alexander Duyckbd508172010-11-16 19:27:03 -08001940 switch (hw->mac.type) {
1941 case ixgbe_mac_82598EB:
Alexander Duyckfe49f042009-06-04 16:00:09 +00001942 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
Alexander Duyckbd508172010-11-16 19:27:03 -08001943 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1944 break;
1945 case ixgbe_mac_82599EB:
Don Skidmoreb93a2222010-11-16 19:27:17 -08001946 case ixgbe_mac_X540:
Alexander Duyckfe49f042009-06-04 16:00:09 +00001947 mask = (qmask & 0xFFFFFFFF);
Alexander Duyckbd508172010-11-16 19:27:03 -08001948 if (mask)
1949 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
Alexander Duyckfe49f042009-06-04 16:00:09 +00001950 mask = (qmask >> 32);
Alexander Duyckbd508172010-11-16 19:27:03 -08001951 if (mask)
1952 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1953 break;
1954 default:
1955 break;
Alexander Duyckfe49f042009-06-04 16:00:09 +00001956 }
1957 /* skip the flush */
1958}
1959
1960static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
Joe Perchese8e9f692010-09-07 21:34:53 +00001961 u64 qmask)
Alexander Duyckfe49f042009-06-04 16:00:09 +00001962{
1963 u32 mask;
Alexander Duyckbd508172010-11-16 19:27:03 -08001964 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyckfe49f042009-06-04 16:00:09 +00001965
Alexander Duyckbd508172010-11-16 19:27:03 -08001966 switch (hw->mac.type) {
1967 case ixgbe_mac_82598EB:
Alexander Duyckfe49f042009-06-04 16:00:09 +00001968 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
Alexander Duyckbd508172010-11-16 19:27:03 -08001969 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1970 break;
1971 case ixgbe_mac_82599EB:
Don Skidmoreb93a2222010-11-16 19:27:17 -08001972 case ixgbe_mac_X540:
Alexander Duyckfe49f042009-06-04 16:00:09 +00001973 mask = (qmask & 0xFFFFFFFF);
Alexander Duyckbd508172010-11-16 19:27:03 -08001974 if (mask)
1975 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
Alexander Duyckfe49f042009-06-04 16:00:09 +00001976 mask = (qmask >> 32);
Alexander Duyckbd508172010-11-16 19:27:03 -08001977 if (mask)
1978 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1979 break;
1980 default:
1981 break;
Alexander Duyckfe49f042009-06-04 16:00:09 +00001982 }
1983 /* skip the flush */
1984}
1985
Auke Kok9a799d72007-09-15 14:07:45 -07001986static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
1987{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001988 struct ixgbe_q_vector *q_vector = data;
1989 struct ixgbe_adapter *adapter = q_vector->adapter;
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001990 struct ixgbe_ring *tx_ring;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001991 int i, r_idx;
Auke Kok9a799d72007-09-15 14:07:45 -07001992
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001993 if (!q_vector->txr_count)
1994 return IRQ_HANDLED;
1995
1996 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1997 for (i = 0; i < q_vector->txr_count; i++) {
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00001998 tx_ring = adapter->tx_ring[r_idx];
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001999 tx_ring->total_bytes = 0;
2000 tx_ring->total_packets = 0;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002001 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
Joe Perchese8e9f692010-09-07 21:34:53 +00002002 r_idx + 1);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002003 }
2004
Jesse Brandeburg9b471442009-12-03 11:33:54 +00002005 /* EIAM disabled interrupts (on this vector) for us */
Alexander Duyck91281fd2009-06-04 16:00:27 +00002006 napi_schedule(&q_vector->napi);
2007
Auke Kok9a799d72007-09-15 14:07:45 -07002008 return IRQ_HANDLED;
2009}
2010
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002011/**
2012 * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
2013 * @irq: unused
2014 * @data: pointer to our q_vector struct for this interrupt vector
2015 **/
Auke Kok9a799d72007-09-15 14:07:45 -07002016static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
2017{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002018 struct ixgbe_q_vector *q_vector = data;
2019 struct ixgbe_adapter *adapter = q_vector->adapter;
Jesse Brandeburg3a581072008-08-26 04:27:08 -07002020 struct ixgbe_ring *rx_ring;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002021 int r_idx;
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07002022 int i;
Auke Kok9a799d72007-09-15 14:07:45 -07002023
Alexander Duyck33cf09c2010-11-16 19:26:55 -08002024#ifdef CONFIG_IXGBE_DCA
2025 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
2026 ixgbe_update_dca(q_vector);
2027#endif
2028
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002029 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
Alexander Duyck33cf09c2010-11-16 19:26:55 -08002030 for (i = 0; i < q_vector->rxr_count; i++) {
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00002031 rx_ring = adapter->rx_ring[r_idx];
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07002032 rx_ring->total_bytes = 0;
2033 rx_ring->total_packets = 0;
2034 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
Joe Perchese8e9f692010-09-07 21:34:53 +00002035 r_idx + 1);
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07002036 }
2037
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002038 if (!q_vector->rxr_count)
2039 return IRQ_HANDLED;
2040
Jesse Brandeburg9b471442009-12-03 11:33:54 +00002041 /* EIAM disabled interrupts (on this vector) for us */
Ben Hutchings288379f2009-01-19 16:43:59 -08002042 napi_schedule(&q_vector->napi);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002043
Auke Kok9a799d72007-09-15 14:07:45 -07002044 return IRQ_HANDLED;
2045}
2046
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002047static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
2048{
Alexander Duyck91281fd2009-06-04 16:00:27 +00002049 struct ixgbe_q_vector *q_vector = data;
2050 struct ixgbe_adapter *adapter = q_vector->adapter;
2051 struct ixgbe_ring *ring;
2052 int r_idx;
2053 int i;
2054
2055 if (!q_vector->txr_count && !q_vector->rxr_count)
2056 return IRQ_HANDLED;
2057
2058 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
2059 for (i = 0; i < q_vector->txr_count; i++) {
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00002060 ring = adapter->tx_ring[r_idx];
Alexander Duyck91281fd2009-06-04 16:00:27 +00002061 ring->total_bytes = 0;
2062 ring->total_packets = 0;
2063 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
Joe Perchese8e9f692010-09-07 21:34:53 +00002064 r_idx + 1);
Alexander Duyck91281fd2009-06-04 16:00:27 +00002065 }
2066
2067 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
2068 for (i = 0; i < q_vector->rxr_count; i++) {
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00002069 ring = adapter->rx_ring[r_idx];
Alexander Duyck91281fd2009-06-04 16:00:27 +00002070 ring->total_bytes = 0;
2071 ring->total_packets = 0;
2072 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
Joe Perchese8e9f692010-09-07 21:34:53 +00002073 r_idx + 1);
Alexander Duyck91281fd2009-06-04 16:00:27 +00002074 }
2075
Jesse Brandeburg9b471442009-12-03 11:33:54 +00002076 /* EIAM disabled interrupts (on this vector) for us */
Alexander Duyck91281fd2009-06-04 16:00:27 +00002077 napi_schedule(&q_vector->napi);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002078
2079 return IRQ_HANDLED;
2080}
2081
2082/**
2083 * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine
2084 * @napi: napi struct with our devices info in it
2085 * @budget: amount of work driver is allowed to do this pass, in packets
2086 *
Jesse Brandeburgf0848272008-09-11 19:59:42 -07002087 * This function is optimized for cleaning one queue only on a single
2088 * q_vector!!!
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002089 **/
Auke Kok9a799d72007-09-15 14:07:45 -07002090static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
2091{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002092 struct ixgbe_q_vector *q_vector =
Joe Perchese8e9f692010-09-07 21:34:53 +00002093 container_of(napi, struct ixgbe_q_vector, napi);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002094 struct ixgbe_adapter *adapter = q_vector->adapter;
Jesse Brandeburgf0848272008-09-11 19:59:42 -07002095 struct ixgbe_ring *rx_ring = NULL;
Auke Kok9a799d72007-09-15 14:07:45 -07002096 int work_done = 0;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002097 long r_idx;
Auke Kok9a799d72007-09-15 14:07:45 -07002098
Jeff Garzik5dd2d332008-10-16 05:09:31 -04002099#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -08002100 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
Alexander Duyck33cf09c2010-11-16 19:26:55 -08002101 ixgbe_update_dca(q_vector);
Jeb Cramerbd0362d2008-03-03 15:04:02 -08002102#endif
Auke Kok9a799d72007-09-15 14:07:45 -07002103
Alexander Duyck33cf09c2010-11-16 19:26:55 -08002104 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
2105 rx_ring = adapter->rx_ring[r_idx];
2106
Herbert Xu78b6f4c2009-01-18 21:49:45 -08002107 ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
Auke Kok9a799d72007-09-15 14:07:45 -07002108
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002109 /* If all Rx work done, exit the polling mode */
2110 if (work_done < budget) {
Ben Hutchings288379f2009-01-19 16:43:59 -08002111 napi_complete(napi);
Nelson, Shannonf7554a22009-09-18 09:46:06 +00002112 if (adapter->rx_itr_setting & 1)
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08002113 ixgbe_set_itr_msix(q_vector);
Auke Kok9a799d72007-09-15 14:07:45 -07002114 if (!test_bit(__IXGBE_DOWN, &adapter->state))
Alexander Duyckfe49f042009-06-04 16:00:09 +00002115 ixgbe_irq_enable_queues(adapter,
Joe Perchese8e9f692010-09-07 21:34:53 +00002116 ((u64)1 << q_vector->v_idx));
Auke Kok9a799d72007-09-15 14:07:45 -07002117 }
2118
2119 return work_done;
2120}
2121
Jesse Brandeburgf0848272008-09-11 19:59:42 -07002122/**
Alexander Duyck91281fd2009-06-04 16:00:27 +00002123 * ixgbe_clean_rxtx_many - msix (aka one shot) rx clean routine
Jesse Brandeburgf0848272008-09-11 19:59:42 -07002124 * @napi: napi struct with our devices info in it
2125 * @budget: amount of work driver is allowed to do this pass, in packets
2126 *
2127 * This function will clean more than one rx queue associated with a
2128 * q_vector.
2129 **/
Alexander Duyck91281fd2009-06-04 16:00:27 +00002130static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
Jesse Brandeburgf0848272008-09-11 19:59:42 -07002131{
2132 struct ixgbe_q_vector *q_vector =
Joe Perchese8e9f692010-09-07 21:34:53 +00002133 container_of(napi, struct ixgbe_q_vector, napi);
Jesse Brandeburgf0848272008-09-11 19:59:42 -07002134 struct ixgbe_adapter *adapter = q_vector->adapter;
Alexander Duyck91281fd2009-06-04 16:00:27 +00002135 struct ixgbe_ring *ring = NULL;
Jesse Brandeburgf0848272008-09-11 19:59:42 -07002136 int work_done = 0, i;
2137 long r_idx;
Alexander Duyck91281fd2009-06-04 16:00:27 +00002138 bool tx_clean_complete = true;
2139
Alexander Duyck33cf09c2010-11-16 19:26:55 -08002140#ifdef CONFIG_IXGBE_DCA
2141 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
2142 ixgbe_update_dca(q_vector);
2143#endif
2144
Alexander Duyck91281fd2009-06-04 16:00:27 +00002145 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
2146 for (i = 0; i < q_vector->txr_count; i++) {
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00002147 ring = adapter->tx_ring[r_idx];
Alexander Duyck91281fd2009-06-04 16:00:27 +00002148 tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
2149 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
Joe Perchese8e9f692010-09-07 21:34:53 +00002150 r_idx + 1);
Alexander Duyck91281fd2009-06-04 16:00:27 +00002151 }
Jesse Brandeburgf0848272008-09-11 19:59:42 -07002152
2153 /* attempt to distribute budget to each queue fairly, but don't allow
2154 * the budget to go below 1 because we'll exit polling */
2155 budget /= (q_vector->rxr_count ?: 1);
2156 budget = max(budget, 1);
2157 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
2158 for (i = 0; i < q_vector->rxr_count; i++) {
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00002159 ring = adapter->rx_ring[r_idx];
Alexander Duyck91281fd2009-06-04 16:00:27 +00002160 ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
Jesse Brandeburgf0848272008-09-11 19:59:42 -07002161 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
Joe Perchese8e9f692010-09-07 21:34:53 +00002162 r_idx + 1);
Jesse Brandeburgf0848272008-09-11 19:59:42 -07002163 }
2164
2165 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00002166 ring = adapter->rx_ring[r_idx];
Jesse Brandeburgf0848272008-09-11 19:59:42 -07002167 /* If all Rx work done, exit the polling mode */
Jesse Brandeburg7f821872008-09-11 20:00:16 -07002168 if (work_done < budget) {
Ben Hutchings288379f2009-01-19 16:43:59 -08002169 napi_complete(napi);
Nelson, Shannonf7554a22009-09-18 09:46:06 +00002170 if (adapter->rx_itr_setting & 1)
Jesse Brandeburgf0848272008-09-11 19:59:42 -07002171 ixgbe_set_itr_msix(q_vector);
2172 if (!test_bit(__IXGBE_DOWN, &adapter->state))
Alexander Duyckfe49f042009-06-04 16:00:09 +00002173 ixgbe_irq_enable_queues(adapter,
Joe Perchese8e9f692010-09-07 21:34:53 +00002174 ((u64)1 << q_vector->v_idx));
Jesse Brandeburgf0848272008-09-11 19:59:42 -07002175 return 0;
2176 }
2177
2178 return work_done;
2179}
Alexander Duyck91281fd2009-06-04 16:00:27 +00002180
2181/**
2182 * ixgbe_clean_txonly - msix (aka one shot) tx clean routine
2183 * @napi: napi struct with our devices info in it
2184 * @budget: amount of work driver is allowed to do this pass, in packets
2185 *
2186 * This function is optimized for cleaning one queue only on a single
2187 * q_vector!!!
2188 **/
2189static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
2190{
2191 struct ixgbe_q_vector *q_vector =
Joe Perchese8e9f692010-09-07 21:34:53 +00002192 container_of(napi, struct ixgbe_q_vector, napi);
Alexander Duyck91281fd2009-06-04 16:00:27 +00002193 struct ixgbe_adapter *adapter = q_vector->adapter;
2194 struct ixgbe_ring *tx_ring = NULL;
2195 int work_done = 0;
2196 long r_idx;
2197
Alexander Duyck91281fd2009-06-04 16:00:27 +00002198#ifdef CONFIG_IXGBE_DCA
2199 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
Alexander Duyck33cf09c2010-11-16 19:26:55 -08002200 ixgbe_update_dca(q_vector);
Alexander Duyck91281fd2009-06-04 16:00:27 +00002201#endif
2202
Alexander Duyck33cf09c2010-11-16 19:26:55 -08002203 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
2204 tx_ring = adapter->tx_ring[r_idx];
2205
Alexander Duyck91281fd2009-06-04 16:00:27 +00002206 if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
2207 work_done = budget;
2208
Nelson, Shannonf7554a22009-09-18 09:46:06 +00002209 /* If all Tx work done, exit the polling mode */
Alexander Duyck91281fd2009-06-04 16:00:27 +00002210 if (work_done < budget) {
2211 napi_complete(napi);
Nelson, Shannonf7554a22009-09-18 09:46:06 +00002212 if (adapter->tx_itr_setting & 1)
Alexander Duyck91281fd2009-06-04 16:00:27 +00002213 ixgbe_set_itr_msix(q_vector);
2214 if (!test_bit(__IXGBE_DOWN, &adapter->state))
Joe Perchese8e9f692010-09-07 21:34:53 +00002215 ixgbe_irq_enable_queues(adapter,
2216 ((u64)1 << q_vector->v_idx));
Alexander Duyck91281fd2009-06-04 16:00:27 +00002217 }
2218
2219 return work_done;
2220}
2221
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002222static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
Joe Perchese8e9f692010-09-07 21:34:53 +00002223 int r_idx)
Auke Kok9a799d72007-09-15 14:07:45 -07002224{
Alexander Duyck7a921c92009-05-06 10:43:28 +00002225 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
Alexander Duyck22745432010-11-16 19:27:10 -08002226 struct ixgbe_ring *rx_ring = a->rx_ring[r_idx];
Alexander Duyck7a921c92009-05-06 10:43:28 +00002227
2228 set_bit(r_idx, q_vector->rxr_idx);
2229 q_vector->rxr_count++;
Alexander Duyck22745432010-11-16 19:27:10 -08002230 rx_ring->q_vector = q_vector;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002231}
Auke Kok9a799d72007-09-15 14:07:45 -07002232
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002233static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
Joe Perchese8e9f692010-09-07 21:34:53 +00002234 int t_idx)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002235{
Alexander Duyck7a921c92009-05-06 10:43:28 +00002236 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
Alexander Duyck22745432010-11-16 19:27:10 -08002237 struct ixgbe_ring *tx_ring = a->tx_ring[t_idx];
Alexander Duyck7a921c92009-05-06 10:43:28 +00002238
2239 set_bit(t_idx, q_vector->txr_idx);
2240 q_vector->txr_count++;
Alexander Duyck22745432010-11-16 19:27:10 -08002241 tx_ring->q_vector = q_vector;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002242}
Auke Kok9a799d72007-09-15 14:07:45 -07002243
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002244/**
2245 * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
2246 * @adapter: board private structure to initialize
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002247 *
2248 * This function maps descriptor rings to the queue-specific vectors
2249 * we were allotted through the MSI-X enabling code. Ideally, we'd have
2250 * one vector per ring/queue, but on a constrained vector budget, we
2251 * group the rings as "efficiently" as possible. You would add new
2252 * mapping configurations in here.
2253 **/
Alexander Duyckd0759eb2010-11-16 19:27:09 -08002254static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002255{
Alexander Duyckd0759eb2010-11-16 19:27:09 -08002256 int q_vectors;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002257 int v_start = 0;
2258 int rxr_idx = 0, txr_idx = 0;
2259 int rxr_remaining = adapter->num_rx_queues;
2260 int txr_remaining = adapter->num_tx_queues;
2261 int i, j;
2262 int rqpv, tqpv;
2263 int err = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07002264
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002265 /* No mapping required if MSI-X is disabled. */
2266 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
Auke Kok9a799d72007-09-15 14:07:45 -07002267 goto out;
2268
Alexander Duyckd0759eb2010-11-16 19:27:09 -08002269 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2270
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002271 /*
2272 * The ideal configuration...
2273 * We have enough vectors to map one per queue.
2274 */
Alexander Duyckd0759eb2010-11-16 19:27:09 -08002275 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002276 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
2277 map_vector_to_rxq(adapter, v_start, rxr_idx);
2278
2279 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
2280 map_vector_to_txq(adapter, v_start, txr_idx);
2281
2282 goto out;
2283 }
2284
2285 /*
2286 * If we don't have enough vectors for a 1-to-1
2287 * mapping, we'll have to group them so there are
2288 * multiple queues per vector.
2289 */
2290 /* Re-adjusting *qpv takes care of the remainder. */
Alexander Duyckd0759eb2010-11-16 19:27:09 -08002291 for (i = v_start; i < q_vectors; i++) {
2292 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002293 for (j = 0; j < rqpv; j++) {
2294 map_vector_to_rxq(adapter, i, rxr_idx);
2295 rxr_idx++;
2296 rxr_remaining--;
Auke Kok9a799d72007-09-15 14:07:45 -07002297 }
Alexander Duyckd0759eb2010-11-16 19:27:09 -08002298 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002299 for (j = 0; j < tqpv; j++) {
2300 map_vector_to_txq(adapter, i, txr_idx);
2301 txr_idx++;
2302 txr_remaining--;
Auke Kok9a799d72007-09-15 14:07:45 -07002303 }
Auke Kok9a799d72007-09-15 14:07:45 -07002304 }
Auke Kok9a799d72007-09-15 14:07:45 -07002305out:
Auke Kok9a799d72007-09-15 14:07:45 -07002306 return err;
2307}
2308
2309/**
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002310 * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
2311 * @adapter: board private structure
2312 *
2313 * ixgbe_request_msix_irqs allocates MSI-X vectors and requests
2314 * interrupts from the kernel.
2315 **/
2316static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2317{
2318 struct net_device *netdev = adapter->netdev;
2319 irqreturn_t (*handler)(int, void *);
2320 int i, vector, q_vectors, err;
Joe Perchese8e9f692010-09-07 21:34:53 +00002321 int ri = 0, ti = 0;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002322
2323 /* Decrement for Other and TCP Timer vectors */
2324 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2325
Alexander Duyckd0759eb2010-11-16 19:27:09 -08002326 err = ixgbe_map_rings_to_vectors(adapter);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002327 if (err)
Alexander Duyckd0759eb2010-11-16 19:27:09 -08002328 return err;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002329
Alexander Duyckd0759eb2010-11-16 19:27:09 -08002330#define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \
2331 ? &ixgbe_msix_clean_many : \
2332 (_v)->rxr_count ? &ixgbe_msix_clean_rx : \
2333 (_v)->txr_count ? &ixgbe_msix_clean_tx : \
2334 NULL)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002335 for (vector = 0; vector < q_vectors; vector++) {
Alexander Duyckd0759eb2010-11-16 19:27:09 -08002336 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
2337 handler = SET_HANDLER(q_vector);
Robert Olssoncb13fc22008-11-25 16:43:52 -08002338
Joe Perchese8e9f692010-09-07 21:34:53 +00002339 if (handler == &ixgbe_msix_clean_rx) {
Alexander Duyckd0759eb2010-11-16 19:27:09 -08002340 sprintf(q_vector->name, "%s-%s-%d",
Robert Olssoncb13fc22008-11-25 16:43:52 -08002341 netdev->name, "rx", ri++);
Joe Perchese8e9f692010-09-07 21:34:53 +00002342 } else if (handler == &ixgbe_msix_clean_tx) {
Alexander Duyckd0759eb2010-11-16 19:27:09 -08002343 sprintf(q_vector->name, "%s-%s-%d",
Robert Olssoncb13fc22008-11-25 16:43:52 -08002344 netdev->name, "tx", ti++);
Alexander Duyckd0759eb2010-11-16 19:27:09 -08002345 } else if (handler == &ixgbe_msix_clean_many) {
2346 sprintf(q_vector->name, "%s-%s-%d",
Alexander Duyck32aa77a2010-11-16 19:26:59 -08002347 netdev->name, "TxRx", ri++);
2348 ti++;
Alexander Duyckd0759eb2010-11-16 19:27:09 -08002349 } else {
2350 /* skip this unused q_vector */
2351 continue;
Alexander Duyck32aa77a2010-11-16 19:26:59 -08002352 }
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002353 err = request_irq(adapter->msix_entries[vector].vector,
Alexander Duyckd0759eb2010-11-16 19:27:09 -08002354 handler, 0, q_vector->name,
2355 q_vector);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002356 if (err) {
Emil Tantilov396e7992010-07-01 20:05:12 +00002357 e_err(probe, "request_irq failed for MSIX interrupt "
Emil Tantilov849c4542010-06-03 16:53:41 +00002358 "Error: %d\n", err);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002359 goto free_queue_irqs;
2360 }
2361 }
2362
Alexander Duyckd0759eb2010-11-16 19:27:09 -08002363 sprintf(adapter->lsc_int_name, "%s:lsc", netdev->name);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002364 err = request_irq(adapter->msix_entries[vector].vector,
Alexander Duyckd0759eb2010-11-16 19:27:09 -08002365 ixgbe_msix_lsc, 0, adapter->lsc_int_name, netdev);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002366 if (err) {
Emil Tantilov396e7992010-07-01 20:05:12 +00002367 e_err(probe, "request_irq for msix_lsc failed: %d\n", err);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002368 goto free_queue_irqs;
2369 }
2370
2371 return 0;
2372
2373free_queue_irqs:
2374 for (i = vector - 1; i >= 0; i--)
2375 free_irq(adapter->msix_entries[--vector].vector,
Joe Perchese8e9f692010-09-07 21:34:53 +00002376 adapter->q_vector[i]);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002377 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
2378 pci_disable_msix(adapter->pdev);
2379 kfree(adapter->msix_entries);
2380 adapter->msix_entries = NULL;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002381 return err;
2382}
2383
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08002384static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
2385{
Alexander Duyck7a921c92009-05-06 10:43:28 +00002386 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00002387 struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
2388 struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
Alexander Duyck125601b2010-11-16 19:27:08 -08002389 u32 new_itr = q_vector->eitr;
2390 u8 current_itr;
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08002391
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07002392 q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
Joe Perchese8e9f692010-09-07 21:34:53 +00002393 q_vector->tx_itr,
2394 tx_ring->total_packets,
2395 tx_ring->total_bytes);
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07002396 q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr,
Joe Perchese8e9f692010-09-07 21:34:53 +00002397 q_vector->rx_itr,
2398 rx_ring->total_packets,
2399 rx_ring->total_bytes);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08002400
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07002401 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08002402
2403 switch (current_itr) {
2404 /* counts and packets in update_itr are dependent on these numbers */
2405 case lowest_latency:
2406 new_itr = 100000;
2407 break;
2408 case low_latency:
2409 new_itr = 20000; /* aka hwitr = ~200 */
2410 break;
2411 case bulk_latency:
2412 new_itr = 8000;
2413 break;
2414 default:
2415 break;
2416 }
2417
2418 if (new_itr != q_vector->eitr) {
Alexander Duyckfe49f042009-06-04 16:00:09 +00002419 /* do an exponential smoothing */
Alexander Duyck125601b2010-11-16 19:27:08 -08002420 new_itr = ((q_vector->eitr * 9) + new_itr)/10;
Jesse Brandeburg509ee932009-03-13 22:13:28 +00002421
Alexander Duyck125601b2010-11-16 19:27:08 -08002422 /* save the algorithm value here */
Jesse Brandeburg509ee932009-03-13 22:13:28 +00002423 q_vector->eitr = new_itr;
Alexander Duyckfe49f042009-06-04 16:00:09 +00002424
2425 ixgbe_write_eitr(q_vector);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08002426 }
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08002427}
2428
Alexey Dobriyan79aefa42008-11-19 14:17:02 -08002429/**
Alexey Dobriyan79aefa42008-11-19 14:17:02 -08002430 * ixgbe_irq_enable - Enable default interrupt generation settings
2431 * @adapter: board private structure
2432 **/
Emil Tantilov6af3b9e2010-09-29 21:35:23 +00002433static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
2434 bool flush)
Alexey Dobriyan79aefa42008-11-19 14:17:02 -08002435{
2436 u32 mask;
Nelson, Shannon835462f2009-04-27 22:42:54 +00002437
2438 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
Mallikarjuna R Chilakala119fc602010-05-20 23:07:06 -07002439 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
2440 mask |= IXGBE_EIMS_GPI_SDP0;
David S. Miller6ab33d52008-11-20 16:44:00 -08002441 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
2442 mask |= IXGBE_EIMS_GPI_SDP1;
Alexander Duyckbd508172010-11-16 19:27:03 -08002443 switch (adapter->hw.mac.type) {
2444 case ixgbe_mac_82599EB:
Don Skidmoreb93a2222010-11-16 19:27:17 -08002445 case ixgbe_mac_X540:
Jesse Brandeburg2a41ff82009-03-13 22:14:30 +00002446 mask |= IXGBE_EIMS_ECC;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002447 mask |= IXGBE_EIMS_GPI_SDP1;
2448 mask |= IXGBE_EIMS_GPI_SDP2;
Greg Rose1cdd1ec2010-01-09 02:26:46 +00002449 if (adapter->num_vfs)
2450 mask |= IXGBE_EIMS_MAILBOX;
Alexander Duyckbd508172010-11-16 19:27:03 -08002451 break;
2452 default:
2453 break;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002454 }
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00002455 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
2456 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
2457 mask |= IXGBE_EIMS_FLOW_DIR;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002458
Alexey Dobriyan79aefa42008-11-19 14:17:02 -08002459 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
Emil Tantilov6af3b9e2010-09-29 21:35:23 +00002460 if (queues)
2461 ixgbe_irq_enable_queues(adapter, ~0);
2462 if (flush)
2463 IXGBE_WRITE_FLUSH(&adapter->hw);
Greg Rose1cdd1ec2010-01-09 02:26:46 +00002464
2465 if (adapter->num_vfs > 32) {
2466 u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
2467 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
2468 }
Alexey Dobriyan79aefa42008-11-19 14:17:02 -08002469}
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002470
2471/**
2472 * ixgbe_intr - legacy mode Interrupt Handler
Auke Kok9a799d72007-09-15 14:07:45 -07002473 * @irq: interrupt number
2474 * @data: pointer to a network interface device structure
Auke Kok9a799d72007-09-15 14:07:45 -07002475 **/
2476static irqreturn_t ixgbe_intr(int irq, void *data)
2477{
2478 struct net_device *netdev = data;
2479 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2480 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck7a921c92009-05-06 10:43:28 +00002481 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9a799d72007-09-15 14:07:45 -07002482 u32 eicr;
2483
Don Skidmore54037502009-02-21 15:42:56 -08002484 /*
Emil Tantilov6af3b9e2010-09-29 21:35:23 +00002485 * Workaround for silicon errata on 82598. Mask the interrupts
Don Skidmore54037502009-02-21 15:42:56 -08002486 * before the read of EICR.
2487 */
2488 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
2489
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002490 /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
2491 * therefore no explict interrupt disable is necessary */
2492 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
Jesse Brandeburgf47cf662008-09-11 19:56:14 -07002493 if (!eicr) {
Emil Tantilov6af3b9e2010-09-29 21:35:23 +00002494 /*
2495 * shared interrupt alert!
Jesse Brandeburgf47cf662008-09-11 19:56:14 -07002496 * make sure interrupts are enabled because the read will
Emil Tantilov6af3b9e2010-09-29 21:35:23 +00002497 * have disabled interrupts due to EIAM
2498 * finish the workaround of silicon errata on 82598. Unmask
2499 * the interrupt that we masked before the EICR read.
2500 */
2501 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2502 ixgbe_irq_enable(adapter, true, true);
Auke Kok9a799d72007-09-15 14:07:45 -07002503 return IRQ_NONE; /* Not our interrupt */
Jesse Brandeburgf47cf662008-09-11 19:56:14 -07002504 }
Auke Kok9a799d72007-09-15 14:07:45 -07002505
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07002506 if (eicr & IXGBE_EICR_LSC)
2507 ixgbe_check_lsc(adapter);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002508
Alexander Duyckbd508172010-11-16 19:27:03 -08002509 switch (hw->mac.type) {
2510 case ixgbe_mac_82599EB:
Don Skidmoreb93a2222010-11-16 19:27:17 -08002511 case ixgbe_mac_X540:
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002512 ixgbe_check_sfp_event(adapter, eicr);
Alexander Duyckbd508172010-11-16 19:27:03 -08002513 if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
2514 ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
2515 adapter->interrupt_event = eicr;
2516 schedule_work(&adapter->check_overtemp_task);
2517 }
2518 break;
2519 default:
2520 break;
2521 }
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002522
Jesse Brandeburg0befdb32008-10-31 00:46:40 -07002523 ixgbe_check_fan_failure(adapter, eicr);
2524
Alexander Duyck7a921c92009-05-06 10:43:28 +00002525 if (napi_schedule_prep(&(q_vector->napi))) {
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00002526 adapter->tx_ring[0]->total_packets = 0;
2527 adapter->tx_ring[0]->total_bytes = 0;
2528 adapter->rx_ring[0]->total_packets = 0;
2529 adapter->rx_ring[0]->total_bytes = 0;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002530 /* would disable interrupts here but EIAM disabled it */
Alexander Duyck7a921c92009-05-06 10:43:28 +00002531 __napi_schedule(&(q_vector->napi));
Auke Kok9a799d72007-09-15 14:07:45 -07002532 }
2533
Emil Tantilov6af3b9e2010-09-29 21:35:23 +00002534 /*
2535 * re-enable link(maybe) and non-queue interrupts, no flush.
2536 * ixgbe_poll will re-enable the queue interrupts
2537 */
2538
2539 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2540 ixgbe_irq_enable(adapter, false, false);
2541
Auke Kok9a799d72007-09-15 14:07:45 -07002542 return IRQ_HANDLED;
2543}
2544
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002545static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
2546{
2547 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2548
2549 for (i = 0; i < q_vectors; i++) {
Alexander Duyck7a921c92009-05-06 10:43:28 +00002550 struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002551 bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
2552 bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
2553 q_vector->rxr_count = 0;
2554 q_vector->txr_count = 0;
2555 }
2556}
2557
Auke Kok9a799d72007-09-15 14:07:45 -07002558/**
2559 * ixgbe_request_irq - initialize interrupts
2560 * @adapter: board private structure
2561 *
2562 * Attempts to configure interrupts using the best available
2563 * capabilities of the hardware and kernel.
2564 **/
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002565static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
Auke Kok9a799d72007-09-15 14:07:45 -07002566{
2567 struct net_device *netdev = adapter->netdev;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002568 int err;
Auke Kok9a799d72007-09-15 14:07:45 -07002569
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002570 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2571 err = ixgbe_request_msix_irqs(adapter);
2572 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
Joe Perchesa0607fd2009-11-18 23:29:17 -08002573 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
Joe Perchese8e9f692010-09-07 21:34:53 +00002574 netdev->name, netdev);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002575 } else {
Joe Perchesa0607fd2009-11-18 23:29:17 -08002576 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
Joe Perchese8e9f692010-09-07 21:34:53 +00002577 netdev->name, netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07002578 }
2579
Auke Kok9a799d72007-09-15 14:07:45 -07002580 if (err)
Emil Tantilov396e7992010-07-01 20:05:12 +00002581 e_err(probe, "request_irq failed, Error %d\n", err);
Auke Kok9a799d72007-09-15 14:07:45 -07002582
Auke Kok9a799d72007-09-15 14:07:45 -07002583 return err;
2584}
2585
2586static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
2587{
2588 struct net_device *netdev = adapter->netdev;
2589
2590 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002591 int i, q_vectors;
Auke Kok9a799d72007-09-15 14:07:45 -07002592
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002593 q_vectors = adapter->num_msix_vectors;
2594
2595 i = q_vectors - 1;
Auke Kok9a799d72007-09-15 14:07:45 -07002596 free_irq(adapter->msix_entries[i].vector, netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07002597
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002598 i--;
2599 for (; i >= 0; i--) {
2600 free_irq(adapter->msix_entries[i].vector,
Joe Perchese8e9f692010-09-07 21:34:53 +00002601 adapter->q_vector[i]);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002602 }
2603
2604 ixgbe_reset_q_vectors(adapter);
2605 } else {
2606 free_irq(adapter->pdev->irq, netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07002607 }
2608}
2609
2610/**
Jesse Brandeburg22d5a712009-03-19 01:24:04 +00002611 * ixgbe_irq_disable - Mask off interrupt generation on the NIC
2612 * @adapter: board private structure
2613 **/
2614static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
2615{
Alexander Duyckbd508172010-11-16 19:27:03 -08002616 switch (adapter->hw.mac.type) {
2617 case ixgbe_mac_82598EB:
Nelson, Shannon835462f2009-04-27 22:42:54 +00002618 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
Alexander Duyckbd508172010-11-16 19:27:03 -08002619 break;
2620 case ixgbe_mac_82599EB:
Don Skidmoreb93a2222010-11-16 19:27:17 -08002621 case ixgbe_mac_X540:
Nelson, Shannon835462f2009-04-27 22:42:54 +00002622 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
2623 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
Jesse Brandeburg22d5a712009-03-19 01:24:04 +00002624 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
Greg Rose1cdd1ec2010-01-09 02:26:46 +00002625 if (adapter->num_vfs > 32)
2626 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
Alexander Duyckbd508172010-11-16 19:27:03 -08002627 break;
2628 default:
2629 break;
Jesse Brandeburg22d5a712009-03-19 01:24:04 +00002630 }
2631 IXGBE_WRITE_FLUSH(&adapter->hw);
2632 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2633 int i;
2634 for (i = 0; i < adapter->num_msix_vectors; i++)
2635 synchronize_irq(adapter->msix_entries[i].vector);
2636 } else {
2637 synchronize_irq(adapter->pdev->irq);
2638 }
2639}
2640
Jesse Brandeburg22d5a712009-03-19 01:24:04 +00002641/**
Auke Kok9a799d72007-09-15 14:07:45 -07002642 * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
2643 *
2644 **/
2645static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
2646{
Auke Kok9a799d72007-09-15 14:07:45 -07002647 struct ixgbe_hw *hw = &adapter->hw;
2648
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002649 IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
Joe Perchese8e9f692010-09-07 21:34:53 +00002650 EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr_param));
Auke Kok9a799d72007-09-15 14:07:45 -07002651
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002652 ixgbe_set_ivar(adapter, 0, 0, 0);
2653 ixgbe_set_ivar(adapter, 1, 0, 0);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002654
2655 map_vector_to_rxq(adapter, 0, 0);
2656 map_vector_to_txq(adapter, 0, 0);
2657
Emil Tantilov396e7992010-07-01 20:05:12 +00002658 e_info(hw, "Legacy interrupt IVAR setup done\n");
Auke Kok9a799d72007-09-15 14:07:45 -07002659}
2660
Alexander Duyck43e69bf2010-08-19 13:35:12 +00002661/**
2662 * ixgbe_configure_tx_ring - Configure 8259x Tx ring after Reset
2663 * @adapter: board private structure
2664 * @ring: structure containing ring specific data
2665 *
2666 * Configure the Tx descriptor ring after a reset.
2667 **/
Alexander Duyck84418e32010-08-19 13:40:54 +00002668void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
2669 struct ixgbe_ring *ring)
Alexander Duyck43e69bf2010-08-19 13:35:12 +00002670{
2671 struct ixgbe_hw *hw = &adapter->hw;
2672 u64 tdba = ring->dma;
Alexander Duyck2f1860b2010-08-19 13:39:43 +00002673 int wait_loop = 10;
2674 u32 txdctl;
Alexander Duyckbf29ee62010-11-16 19:27:07 -08002675 u8 reg_idx = ring->reg_idx;
Alexander Duyck43e69bf2010-08-19 13:35:12 +00002676
Alexander Duyck2f1860b2010-08-19 13:39:43 +00002677 /* disable queue to avoid issues while updating state */
2678 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
2679 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx),
2680 txdctl & ~IXGBE_TXDCTL_ENABLE);
2681 IXGBE_WRITE_FLUSH(hw);
2682
Alexander Duyck43e69bf2010-08-19 13:35:12 +00002683 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx),
Joe Perchese8e9f692010-09-07 21:34:53 +00002684 (tdba & DMA_BIT_MASK(32)));
Alexander Duyck43e69bf2010-08-19 13:35:12 +00002685 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32));
2686 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx),
2687 ring->count * sizeof(union ixgbe_adv_tx_desc));
2688 IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
2689 IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
Alexander Duyck84ea2592010-11-16 19:26:49 -08002690 ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx);
Alexander Duyck43e69bf2010-08-19 13:35:12 +00002691
Alexander Duyck2f1860b2010-08-19 13:39:43 +00002692 /* configure fetching thresholds */
2693 if (adapter->rx_itr_setting == 0) {
2694 /* cannot set wthresh when itr==0 */
2695 txdctl &= ~0x007F0000;
2696 } else {
2697 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
2698 txdctl |= (8 << 16);
2699 }
2700 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
2701 /* PThresh workaround for Tx hang with DFP enabled. */
2702 txdctl |= 32;
2703 }
2704
2705 /* reinitialize flowdirector state */
Alexander Duyckee9e0f02010-11-16 19:27:01 -08002706 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
2707 adapter->atr_sample_rate) {
2708 ring->atr_sample_rate = adapter->atr_sample_rate;
2709 ring->atr_count = 0;
2710 set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
2711 } else {
2712 ring->atr_sample_rate = 0;
2713 }
Alexander Duyck2f1860b2010-08-19 13:39:43 +00002714
John Fastabendc84d3242010-11-16 19:27:12 -08002715 clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
2716
Alexander Duyck2f1860b2010-08-19 13:39:43 +00002717 /* enable queue */
2718 txdctl |= IXGBE_TXDCTL_ENABLE;
2719 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
2720
2721 /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */
2722 if (hw->mac.type == ixgbe_mac_82598EB &&
2723 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
2724 return;
2725
2726 /* poll to verify queue is enabled */
2727 do {
2728 msleep(1);
2729 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
2730 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
2731 if (!wait_loop)
2732 e_err(drv, "Could not enable Tx Queue %d\n", reg_idx);
Alexander Duyck43e69bf2010-08-19 13:35:12 +00002733}
2734
Alexander Duyck120ff942010-08-19 13:34:50 +00002735static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
2736{
2737 struct ixgbe_hw *hw = &adapter->hw;
2738 u32 rttdcs;
2739 u32 mask;
2740
2741 if (hw->mac.type == ixgbe_mac_82598EB)
2742 return;
2743
2744 /* disable the arbiter while setting MTQC */
2745 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2746 rttdcs |= IXGBE_RTTDCS_ARBDIS;
2747 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2748
2749 /* set transmit pool layout */
2750 mask = (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_DCB_ENABLED);
2751 switch (adapter->flags & mask) {
2752
2753 case (IXGBE_FLAG_SRIOV_ENABLED):
2754 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
2755 (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF));
2756 break;
2757
2758 case (IXGBE_FLAG_DCB_ENABLED):
2759 /* We enable 8 traffic classes, DCB only */
2760 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
2761 (IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ));
2762 break;
2763
2764 default:
2765 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2766 break;
2767 }
2768
2769 /* re-enable the arbiter */
2770 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
2771 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2772}
2773
Auke Kok9a799d72007-09-15 14:07:45 -07002774/**
Jesse Brandeburg3a581072008-08-26 04:27:08 -07002775 * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
Auke Kok9a799d72007-09-15 14:07:45 -07002776 * @adapter: board private structure
2777 *
2778 * Configure the Tx unit of the MAC after a reset.
2779 **/
2780static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
2781{
Alexander Duyck2f1860b2010-08-19 13:39:43 +00002782 struct ixgbe_hw *hw = &adapter->hw;
2783 u32 dmatxctl;
Alexander Duyck43e69bf2010-08-19 13:35:12 +00002784 u32 i;
Auke Kok9a799d72007-09-15 14:07:45 -07002785
Alexander Duyck2f1860b2010-08-19 13:39:43 +00002786 ixgbe_setup_mtqc(adapter);
2787
2788 if (hw->mac.type != ixgbe_mac_82598EB) {
2789 /* DMATXCTL.EN must be before Tx queues are enabled */
2790 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2791 dmatxctl |= IXGBE_DMATXCTL_TE;
2792 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2793 }
2794
Auke Kok9a799d72007-09-15 14:07:45 -07002795 /* Setup the HW Tx Head and Tail descriptor pointers */
Alexander Duyck43e69bf2010-08-19 13:35:12 +00002796 for (i = 0; i < adapter->num_tx_queues; i++)
2797 ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
Auke Kok9a799d72007-09-15 14:07:45 -07002798}
2799
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002800#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
Auke Kok9a799d72007-09-15 14:07:45 -07002801
Yi Zoua6616b42009-08-06 13:05:23 +00002802static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
Joe Perchese8e9f692010-09-07 21:34:53 +00002803 struct ixgbe_ring *rx_ring)
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07002804{
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07002805 u32 srrctl;
Alexander Duyckbf29ee62010-11-16 19:27:07 -08002806 u8 reg_idx = rx_ring->reg_idx;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07002807
Alexander Duyckbd508172010-11-16 19:27:03 -08002808 switch (adapter->hw.mac.type) {
2809 case ixgbe_mac_82598EB: {
2810 struct ixgbe_ring_feature *feature = adapter->ring_feature;
2811 const int mask = feature[RING_F_RSS].mask;
Alexander Duyckbf29ee62010-11-16 19:27:07 -08002812 reg_idx = reg_idx & mask;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07002813 }
Alexander Duyckbd508172010-11-16 19:27:03 -08002814 break;
2815 case ixgbe_mac_82599EB:
Don Skidmoreb93a2222010-11-16 19:27:17 -08002816 case ixgbe_mac_X540:
Alexander Duyckbd508172010-11-16 19:27:03 -08002817 default:
2818 break;
2819 }
2820
Alexander Duyckbf29ee62010-11-16 19:27:07 -08002821 srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx));
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07002822
2823 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
2824 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
Alexander Duyck9e10e042010-08-19 13:40:06 +00002825 if (adapter->num_vfs)
2826 srrctl |= IXGBE_SRRCTL_DROP_EN;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07002827
Alexander Duyckafafd5b2009-05-07 10:38:56 +00002828 srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
2829 IXGBE_SRRCTL_BSIZEHDR_MASK;
2830
Alexander Duyck7d637bc2010-11-16 19:26:56 -08002831 if (ring_is_ps_enabled(rx_ring)) {
Alexander Duyckafafd5b2009-05-07 10:38:56 +00002832#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
2833 srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2834#else
2835 srrctl |= (PAGE_SIZE / 2) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2836#endif
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07002837 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07002838 } else {
Alexander Duyckafafd5b2009-05-07 10:38:56 +00002839 srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
2840 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07002841 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07002842 }
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002843
Alexander Duyckbf29ee62010-11-16 19:27:07 -08002844 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx), srrctl);
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07002845}
2846
Alexander Duyck05abb122010-08-19 13:35:41 +00002847static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00002848{
Alexander Duyck05abb122010-08-19 13:35:41 +00002849 struct ixgbe_hw *hw = &adapter->hw;
2850 static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
Joe Perchese8e9f692010-09-07 21:34:53 +00002851 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
2852 0x6A3E67EA, 0x14364D17, 0x3BED200D};
Alexander Duyck05abb122010-08-19 13:35:41 +00002853 u32 mrqc = 0, reta = 0;
2854 u32 rxcsum;
2855 int i, j;
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00002856 int mask;
2857
Alexander Duyck05abb122010-08-19 13:35:41 +00002858 /* Fill out hash function seeds */
2859 for (i = 0; i < 10; i++)
2860 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00002861
Alexander Duyck05abb122010-08-19 13:35:41 +00002862 /* Fill out redirection table */
2863 for (i = 0, j = 0; i < 128; i++, j++) {
2864 if (j == adapter->ring_feature[RING_F_RSS].indices)
2865 j = 0;
2866 /* reta = 4-byte sliding window of
2867 * 0x00..(indices-1)(indices-1)00..etc. */
2868 reta = (reta << 8) | (j * 0x11);
2869 if ((i & 3) == 3)
2870 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2871 }
2872
2873 /* Disable indicating checksum in descriptor, enables RSS hash */
2874 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2875 rxcsum |= IXGBE_RXCSUM_PCSD;
2876 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2877
2878 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2879 mask = adapter->flags & IXGBE_FLAG_RSS_ENABLED;
2880 else
2881 mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00002882#ifdef CONFIG_IXGBE_DCB
Alexander Duyck05abb122010-08-19 13:35:41 +00002883 | IXGBE_FLAG_DCB_ENABLED
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00002884#endif
Alexander Duyck05abb122010-08-19 13:35:41 +00002885 | IXGBE_FLAG_SRIOV_ENABLED
2886 );
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00002887
2888 switch (mask) {
2889 case (IXGBE_FLAG_RSS_ENABLED):
2890 mrqc = IXGBE_MRQC_RSSEN;
2891 break;
Greg Rose1cdd1ec2010-01-09 02:26:46 +00002892 case (IXGBE_FLAG_SRIOV_ENABLED):
2893 mrqc = IXGBE_MRQC_VMDQEN;
2894 break;
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00002895#ifdef CONFIG_IXGBE_DCB
2896 case (IXGBE_FLAG_DCB_ENABLED):
2897 mrqc = IXGBE_MRQC_RT8TCEN;
2898 break;
2899#endif /* CONFIG_IXGBE_DCB */
2900 default:
2901 break;
2902 }
2903
Alexander Duyck05abb122010-08-19 13:35:41 +00002904 /* Perform hash on these packet types */
2905 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
2906 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2907 | IXGBE_MRQC_RSS_FIELD_IPV6
2908 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
2909
2910 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00002911}
2912
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07002913/**
Don Skidmoreb93a2222010-11-16 19:27:17 -08002914 * ixgbe_clear_rscctl - disable RSC for the indicated ring
2915 * @adapter: address of board private structure
2916 * @ring: structure containing ring specific data
2917 **/
2918void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
2919 struct ixgbe_ring *ring)
2920{
2921 struct ixgbe_hw *hw = &adapter->hw;
2922 u32 rscctrl;
2923 u8 reg_idx = ring->reg_idx;
2924
2925 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
2926 rscctrl &= ~IXGBE_RSCCTL_RSCEN;
2927 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
2928}
2929
2930/**
Nelson, Shannonbb5a9ad2009-09-18 09:46:27 +00002931 * ixgbe_configure_rscctl - enable RSC for the indicated ring
2932 * @adapter: address of board private structure
2933 * @index: index of ring to set
Nelson, Shannonbb5a9ad2009-09-18 09:46:27 +00002934 **/
Don Skidmoreb93a2222010-11-16 19:27:17 -08002935void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
Alexander Duyck73670962010-08-19 13:38:34 +00002936 struct ixgbe_ring *ring)
Nelson, Shannonbb5a9ad2009-09-18 09:46:27 +00002937{
Nelson, Shannonbb5a9ad2009-09-18 09:46:27 +00002938 struct ixgbe_hw *hw = &adapter->hw;
Nelson, Shannonbb5a9ad2009-09-18 09:46:27 +00002939 u32 rscctrl;
Mallikarjuna R Chilakalaedd2ea552009-11-23 10:45:11 -08002940 int rx_buf_len;
Alexander Duyckbf29ee62010-11-16 19:27:07 -08002941 u8 reg_idx = ring->reg_idx;
Nelson, Shannonbb5a9ad2009-09-18 09:46:27 +00002942
Alexander Duyck7d637bc2010-11-16 19:26:56 -08002943 if (!ring_is_rsc_enabled(ring))
Alexander Duyck73670962010-08-19 13:38:34 +00002944 return;
2945
2946 rx_buf_len = ring->rx_buf_len;
2947 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
Nelson, Shannonbb5a9ad2009-09-18 09:46:27 +00002948 rscctrl |= IXGBE_RSCCTL_RSCEN;
2949 /*
2950 * we must limit the number of descriptors so that the
2951 * total size of max desc * buf_len is not greater
2952 * than 65535
2953 */
Alexander Duyck7d637bc2010-11-16 19:26:56 -08002954 if (ring_is_ps_enabled(ring)) {
Nelson, Shannonbb5a9ad2009-09-18 09:46:27 +00002955#if (MAX_SKB_FRAGS > 16)
2956 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
2957#elif (MAX_SKB_FRAGS > 8)
2958 rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
2959#elif (MAX_SKB_FRAGS > 4)
2960 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
2961#else
2962 rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
2963#endif
2964 } else {
2965 if (rx_buf_len < IXGBE_RXBUFFER_4096)
2966 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
2967 else if (rx_buf_len < IXGBE_RXBUFFER_8192)
2968 rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
2969 else
2970 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
2971 }
Alexander Duyck73670962010-08-19 13:38:34 +00002972 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
Nelson, Shannonbb5a9ad2009-09-18 09:46:27 +00002973}
2974
Alexander Duyck9e10e042010-08-19 13:40:06 +00002975/**
2976 * ixgbe_set_uta - Set unicast filter table address
2977 * @adapter: board private structure
2978 *
2979 * The unicast table address is a register array of 32-bit registers.
2980 * The table is meant to be used in a way similar to how the MTA is used
2981 * however due to certain limitations in the hardware it is necessary to
2982 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
2983 * enable bit to allow vlan tag stripping when promiscuous mode is enabled
2984 **/
2985static void ixgbe_set_uta(struct ixgbe_adapter *adapter)
2986{
2987 struct ixgbe_hw *hw = &adapter->hw;
2988 int i;
2989
2990 /* The UTA table only exists on 82599 hardware and newer */
2991 if (hw->mac.type < ixgbe_mac_82599EB)
2992 return;
2993
2994 /* we only need to do this if VMDq is enabled */
2995 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
2996 return;
2997
2998 for (i = 0; i < 128; i++)
2999 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
3000}
3001
3002#define IXGBE_MAX_RX_DESC_POLL 10
3003static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
3004 struct ixgbe_ring *ring)
3005{
3006 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck9e10e042010-08-19 13:40:06 +00003007 int wait_loop = IXGBE_MAX_RX_DESC_POLL;
3008 u32 rxdctl;
Alexander Duyckbf29ee62010-11-16 19:27:07 -08003009 u8 reg_idx = ring->reg_idx;
Alexander Duyck9e10e042010-08-19 13:40:06 +00003010
3011 /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */
3012 if (hw->mac.type == ixgbe_mac_82598EB &&
3013 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3014 return;
3015
3016 do {
3017 msleep(1);
3018 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3019 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
3020
3021 if (!wait_loop) {
3022 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
3023 "the polling period\n", reg_idx);
3024 }
3025}
3026
Alexander Duyck84418e32010-08-19 13:40:54 +00003027void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
3028 struct ixgbe_ring *ring)
Alexander Duyckacd37172010-08-19 13:36:05 +00003029{
3030 struct ixgbe_hw *hw = &adapter->hw;
3031 u64 rdba = ring->dma;
Alexander Duyck9e10e042010-08-19 13:40:06 +00003032 u32 rxdctl;
Alexander Duyckbf29ee62010-11-16 19:27:07 -08003033 u8 reg_idx = ring->reg_idx;
Alexander Duyckacd37172010-08-19 13:36:05 +00003034
Alexander Duyck9e10e042010-08-19 13:40:06 +00003035 /* disable queue to avoid issues while updating state */
3036 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3037 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx),
3038 rxdctl & ~IXGBE_RXDCTL_ENABLE);
3039 IXGBE_WRITE_FLUSH(hw);
3040
Alexander Duyckacd37172010-08-19 13:36:05 +00003041 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
3042 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
3043 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx),
3044 ring->count * sizeof(union ixgbe_adv_rx_desc));
3045 IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
3046 IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
Alexander Duyck84ea2592010-11-16 19:26:49 -08003047 ring->tail = hw->hw_addr + IXGBE_RDT(reg_idx);
Alexander Duyck9e10e042010-08-19 13:40:06 +00003048
3049 ixgbe_configure_srrctl(adapter, ring);
3050 ixgbe_configure_rscctl(adapter, ring);
3051
3052 if (hw->mac.type == ixgbe_mac_82598EB) {
3053 /*
3054 * enable cache line friendly hardware writes:
3055 * PTHRESH=32 descriptors (half the internal cache),
3056 * this also removes ugly rx_no_buffer_count increment
3057 * HTHRESH=4 descriptors (to minimize latency on fetch)
3058 * WTHRESH=8 burst writeback up to two cache lines
3059 */
3060 rxdctl &= ~0x3FFFFF;
3061 rxdctl |= 0x080420;
3062 }
3063
3064 /* enable receive descriptor ring */
3065 rxdctl |= IXGBE_RXDCTL_ENABLE;
3066 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
3067
3068 ixgbe_rx_desc_queue_enable(adapter, ring);
Alexander Duyckfc77dc32010-11-16 19:26:51 -08003069 ixgbe_alloc_rx_buffers(ring, IXGBE_DESC_UNUSED(ring));
Alexander Duyckacd37172010-08-19 13:36:05 +00003070}
3071
Alexander Duyck48654522010-08-19 13:36:27 +00003072static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
3073{
3074 struct ixgbe_hw *hw = &adapter->hw;
3075 int p;
3076
3077 /* PSRTYPE must be initialized in non 82598 adapters */
3078 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
Joe Perchese8e9f692010-09-07 21:34:53 +00003079 IXGBE_PSRTYPE_UDPHDR |
3080 IXGBE_PSRTYPE_IPV4HDR |
Alexander Duyck48654522010-08-19 13:36:27 +00003081 IXGBE_PSRTYPE_L2HDR |
Joe Perchese8e9f692010-09-07 21:34:53 +00003082 IXGBE_PSRTYPE_IPV6HDR;
Alexander Duyck48654522010-08-19 13:36:27 +00003083
3084 if (hw->mac.type == ixgbe_mac_82598EB)
3085 return;
3086
3087 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED)
3088 psrtype |= (adapter->num_rx_queues_per_pool << 29);
3089
3090 for (p = 0; p < adapter->num_rx_pools; p++)
3091 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(adapter->num_vfs + p),
3092 psrtype);
3093}
3094
Alexander Duyckf5b4a522010-08-19 13:38:57 +00003095static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
3096{
3097 struct ixgbe_hw *hw = &adapter->hw;
3098 u32 gcr_ext;
3099 u32 vt_reg_bits;
3100 u32 reg_offset, vf_shift;
3101 u32 vmdctl;
3102
3103 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
3104 return;
3105
3106 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
3107 vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN | IXGBE_VT_CTL_REPLEN;
3108 vt_reg_bits |= (adapter->num_vfs << IXGBE_VT_CTL_POOL_SHIFT);
3109 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits);
3110
3111 vf_shift = adapter->num_vfs % 32;
3112 reg_offset = (adapter->num_vfs > 32) ? 1 : 0;
3113
3114 /* Enable only the PF's pool for Tx/Rx */
3115 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
3116 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), 0);
3117 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
3118 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), 0);
3119 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
3120
3121 /* Map PF MAC address in RAR Entry 0 to first pool following VFs */
3122 hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs);
3123
3124 /*
3125 * Set up VF register offsets for selected VT Mode,
3126 * i.e. 32 or 64 VFs for SR-IOV
3127 */
3128 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
3129 gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
3130 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
3131 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
3132
3133 /* enable Tx loopback for VF/PF communication */
3134 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
3135}
3136
Alexander Duyck477de6e2010-08-19 13:38:11 +00003137static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
Auke Kok9a799d72007-09-15 14:07:45 -07003138{
Auke Kok9a799d72007-09-15 14:07:45 -07003139 struct ixgbe_hw *hw = &adapter->hw;
3140 struct net_device *netdev = adapter->netdev;
3141 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07003142 int rx_buf_len;
Alexander Duyck477de6e2010-08-19 13:38:11 +00003143 struct ixgbe_ring *rx_ring;
3144 int i;
3145 u32 mhadd, hlreg0;
Alexander Duyck48654522010-08-19 13:36:27 +00003146
Auke Kok9a799d72007-09-15 14:07:45 -07003147 /* Decide whether to use packet split mode or not */
Greg Rose1cdd1ec2010-01-09 02:26:46 +00003148 /* Do not use packet split if we're in SR-IOV Mode */
3149 if (!adapter->num_vfs)
3150 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
Auke Kok9a799d72007-09-15 14:07:45 -07003151
3152 /* Set the RX buffer length according to the mode */
3153 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07003154 rx_buf_len = IXGBE_RX_HDR_SIZE;
Auke Kok9a799d72007-09-15 14:07:45 -07003155 } else {
Peter P Waskiewicz Jr0c19d6a2009-07-30 12:25:28 +00003156 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
Alexander Duyckf8212f92009-04-27 22:42:37 +00003157 (netdev->mtu <= ETH_DATA_LEN))
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07003158 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
Auke Kok9a799d72007-09-15 14:07:45 -07003159 else
Alexander Duyck477de6e2010-08-19 13:38:11 +00003160 rx_buf_len = ALIGN(max_frame + VLAN_HLEN, 1024);
3161 }
3162
3163#ifdef IXGBE_FCOE
3164 /* adjust max frame to be able to do baby jumbo for FCoE */
3165 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
3166 (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
3167 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
3168
3169#endif /* IXGBE_FCOE */
3170 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3171 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
3172 mhadd &= ~IXGBE_MHADD_MFS_MASK;
3173 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
3174
3175 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
Auke Kok9a799d72007-09-15 14:07:45 -07003176 }
3177
Auke Kok9a799d72007-09-15 14:07:45 -07003178 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
Alexander Duyck477de6e2010-08-19 13:38:11 +00003179 /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */
3180 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
Auke Kok9a799d72007-09-15 14:07:45 -07003181 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
3182
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00003183 /*
3184 * Setup the HW Rx Head and Tail Descriptor Pointers and
3185 * the Base and Length of the Rx Descriptor Ring
3186 */
Auke Kok9a799d72007-09-15 14:07:45 -07003187 for (i = 0; i < adapter->num_rx_queues; i++) {
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00003188 rx_ring = adapter->rx_ring[i];
Yi Zoua6616b42009-08-06 13:05:23 +00003189 rx_ring->rx_buf_len = rx_buf_len;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07003190
Yi Zou6e455b892009-08-06 13:05:44 +00003191 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
Alexander Duyck7d637bc2010-11-16 19:26:56 -08003192 set_ring_ps_enabled(rx_ring);
Peter P Waskiewicz Jr1b3ff022009-09-14 07:47:27 +00003193 else
Alexander Duyck7d637bc2010-11-16 19:26:56 -08003194 clear_ring_ps_enabled(rx_ring);
3195
3196 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
3197 set_ring_rsc_enabled(rx_ring);
3198 else
3199 clear_ring_rsc_enabled(rx_ring);
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07003200
Yi Zou63f39bd2009-05-17 12:34:35 +00003201#ifdef IXGBE_FCOE
Joe Perchese8e9f692010-09-07 21:34:53 +00003202 if (netdev->features & NETIF_F_FCOE_MTU) {
Yi Zou63f39bd2009-05-17 12:34:35 +00003203 struct ixgbe_ring_feature *f;
3204 f = &adapter->ring_feature[RING_F_FCOE];
Yi Zou6e455b892009-08-06 13:05:44 +00003205 if ((i >= f->mask) && (i < f->mask + f->indices)) {
Alexander Duyck7d637bc2010-11-16 19:26:56 -08003206 clear_ring_ps_enabled(rx_ring);
Yi Zou6e455b892009-08-06 13:05:44 +00003207 if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)
3208 rx_ring->rx_buf_len =
Joe Perchese8e9f692010-09-07 21:34:53 +00003209 IXGBE_FCOE_JUMBO_FRAME_SIZE;
Alexander Duyck7d637bc2010-11-16 19:26:56 -08003210 } else if (!ring_is_rsc_enabled(rx_ring) &&
3211 !ring_is_ps_enabled(rx_ring)) {
3212 rx_ring->rx_buf_len =
3213 IXGBE_FCOE_JUMBO_FRAME_SIZE;
Yi Zou6e455b892009-08-06 13:05:44 +00003214 }
Yi Zou63f39bd2009-05-17 12:34:35 +00003215 }
Yi Zou63f39bd2009-05-17 12:34:35 +00003216#endif /* IXGBE_FCOE */
Alexander Duyck477de6e2010-08-19 13:38:11 +00003217 }
Alexander Duyck477de6e2010-08-19 13:38:11 +00003218}
3219
Alexander Duyck73670962010-08-19 13:38:34 +00003220static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
3221{
3222 struct ixgbe_hw *hw = &adapter->hw;
3223 u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
3224
3225 switch (hw->mac.type) {
3226 case ixgbe_mac_82598EB:
3227 /*
3228 * For VMDq support of different descriptor types or
3229 * buffer sizes through the use of multiple SRRCTL
3230 * registers, RDRXCTL.MVMEN must be set to 1
3231 *
3232 * also, the manual doesn't mention it clearly but DCA hints
3233 * will only use queue 0's tags unless this bit is set. Side
3234 * effects of setting this bit are only that SRRCTL must be
3235 * fully programmed [0..15]
3236 */
3237 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
3238 break;
3239 case ixgbe_mac_82599EB:
Don Skidmoreb93a2222010-11-16 19:27:17 -08003240 case ixgbe_mac_X540:
Alexander Duyck73670962010-08-19 13:38:34 +00003241 /* Disable RSC for ACK packets */
3242 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
3243 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
3244 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
3245 /* hardware requires some bits to be set by default */
3246 rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX);
3247 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
3248 break;
3249 default:
3250 /* We should do nothing since we don't know this hardware */
3251 return;
3252 }
3253
3254 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
3255}
3256
Alexander Duyck477de6e2010-08-19 13:38:11 +00003257/**
3258 * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
3259 * @adapter: board private structure
3260 *
3261 * Configure the Rx unit of the MAC after a reset.
3262 **/
3263static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
3264{
3265 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck477de6e2010-08-19 13:38:11 +00003266 int i;
3267 u32 rxctrl;
Alexander Duyck477de6e2010-08-19 13:38:11 +00003268
3269 /* disable receives while setting up the descriptors */
3270 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3271 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
3272
3273 ixgbe_setup_psrtype(adapter);
Alexander Duyck73670962010-08-19 13:38:34 +00003274 ixgbe_setup_rdrxctl(adapter);
Alexander Duyck477de6e2010-08-19 13:38:11 +00003275
Alexander Duyck9e10e042010-08-19 13:40:06 +00003276 /* Program registers for the distribution of queues */
Alexander Duyckf5b4a522010-08-19 13:38:57 +00003277 ixgbe_setup_mrqc(adapter);
Alexander Duyckf5b4a522010-08-19 13:38:57 +00003278
Alexander Duyck9e10e042010-08-19 13:40:06 +00003279 ixgbe_set_uta(adapter);
3280
Alexander Duyck477de6e2010-08-19 13:38:11 +00003281 /* set_rx_buffer_len must be called before ring initialization */
3282 ixgbe_set_rx_buffer_len(adapter);
3283
3284 /*
3285 * Setup the HW Rx Head and Tail Descriptor Pointers and
3286 * the Base and Length of the Rx Descriptor Ring
3287 */
Alexander Duyck9e10e042010-08-19 13:40:06 +00003288 for (i = 0; i < adapter->num_rx_queues; i++)
3289 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]);
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07003290
Alexander Duyck9e10e042010-08-19 13:40:06 +00003291 /* disable drop enable for 82598 parts */
3292 if (hw->mac.type == ixgbe_mac_82598EB)
3293 rxctrl |= IXGBE_RXCTRL_DMBYPS;
3294
3295 /* enable all receives */
3296 rxctrl |= IXGBE_RXCTRL_RXEN;
3297 hw->mac.ops.enable_rx_dma(hw, rxctrl);
Auke Kok9a799d72007-09-15 14:07:45 -07003298}
3299
Auke Kok9a799d72007-09-15 14:07:45 -07003300static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
3301{
3302 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07003303 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose1ada1b12010-01-22 22:45:43 +00003304 int pool_ndx = adapter->num_vfs;
Auke Kok9a799d72007-09-15 14:07:45 -07003305
3306 /* add VID to filter table */
Greg Rose1ada1b12010-01-22 22:45:43 +00003307 hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true);
Jesse Grossf62bbb52010-10-20 13:56:10 +00003308 set_bit(vid, adapter->active_vlans);
Auke Kok9a799d72007-09-15 14:07:45 -07003309}
3310
3311static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
3312{
3313 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07003314 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose1ada1b12010-01-22 22:45:43 +00003315 int pool_ndx = adapter->num_vfs;
Auke Kok9a799d72007-09-15 14:07:45 -07003316
Auke Kok9a799d72007-09-15 14:07:45 -07003317 /* remove VID from filter table */
Greg Rose1ada1b12010-01-22 22:45:43 +00003318 hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
Jesse Grossf62bbb52010-10-20 13:56:10 +00003319 clear_bit(vid, adapter->active_vlans);
Auke Kok9a799d72007-09-15 14:07:45 -07003320}
3321
Jesse Brandeburg5f6c0182010-04-14 16:04:23 -07003322/**
3323 * ixgbe_vlan_filter_disable - helper to disable hw vlan filtering
3324 * @adapter: driver data
3325 */
3326static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter)
3327{
3328 struct ixgbe_hw *hw = &adapter->hw;
Jesse Grossf62bbb52010-10-20 13:56:10 +00003329 u32 vlnctrl;
3330
3331 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3332 vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
3333 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3334}
3335
3336/**
3337 * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering
3338 * @adapter: driver data
3339 */
3340static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter)
3341{
3342 struct ixgbe_hw *hw = &adapter->hw;
3343 u32 vlnctrl;
3344
3345 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3346 vlnctrl |= IXGBE_VLNCTRL_VFE;
3347 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
3348 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3349}
3350
3351/**
3352 * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping
3353 * @adapter: driver data
3354 */
3355static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
3356{
3357 struct ixgbe_hw *hw = &adapter->hw;
3358 u32 vlnctrl;
Jesse Brandeburg5f6c0182010-04-14 16:04:23 -07003359 int i, j;
3360
3361 switch (hw->mac.type) {
3362 case ixgbe_mac_82598EB:
Jesse Grossf62bbb52010-10-20 13:56:10 +00003363 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3364 vlnctrl &= ~IXGBE_VLNCTRL_VME;
Jesse Brandeburg5f6c0182010-04-14 16:04:23 -07003365 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3366 break;
3367 case ixgbe_mac_82599EB:
Don Skidmoreb93a2222010-11-16 19:27:17 -08003368 case ixgbe_mac_X540:
Jesse Brandeburg5f6c0182010-04-14 16:04:23 -07003369 for (i = 0; i < adapter->num_rx_queues; i++) {
3370 j = adapter->rx_ring[i]->reg_idx;
3371 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
3372 vlnctrl &= ~IXGBE_RXDCTL_VME;
3373 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
3374 }
3375 break;
3376 default:
3377 break;
3378 }
3379}
3380
3381/**
Jesse Grossf62bbb52010-10-20 13:56:10 +00003382 * ixgbe_vlan_strip_enable - helper to enable hw vlan stripping
Jesse Brandeburg5f6c0182010-04-14 16:04:23 -07003383 * @adapter: driver data
3384 */
Jesse Grossf62bbb52010-10-20 13:56:10 +00003385static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
Jesse Brandeburg5f6c0182010-04-14 16:04:23 -07003386{
3387 struct ixgbe_hw *hw = &adapter->hw;
Jesse Grossf62bbb52010-10-20 13:56:10 +00003388 u32 vlnctrl;
Jesse Brandeburg5f6c0182010-04-14 16:04:23 -07003389 int i, j;
3390
3391 switch (hw->mac.type) {
3392 case ixgbe_mac_82598EB:
Jesse Grossf62bbb52010-10-20 13:56:10 +00003393 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3394 vlnctrl |= IXGBE_VLNCTRL_VME;
Jesse Brandeburg5f6c0182010-04-14 16:04:23 -07003395 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3396 break;
3397 case ixgbe_mac_82599EB:
Don Skidmoreb93a2222010-11-16 19:27:17 -08003398 case ixgbe_mac_X540:
Jesse Brandeburg5f6c0182010-04-14 16:04:23 -07003399 for (i = 0; i < adapter->num_rx_queues; i++) {
3400 j = adapter->rx_ring[i]->reg_idx;
3401 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
3402 vlnctrl |= IXGBE_RXDCTL_VME;
3403 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
3404 }
3405 break;
3406 default:
3407 break;
3408 }
3409}
3410
Auke Kok9a799d72007-09-15 14:07:45 -07003411static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
3412{
Jesse Grossf62bbb52010-10-20 13:56:10 +00003413 u16 vid;
Auke Kok9a799d72007-09-15 14:07:45 -07003414
Jesse Grossf62bbb52010-10-20 13:56:10 +00003415 ixgbe_vlan_rx_add_vid(adapter->netdev, 0);
3416
3417 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
3418 ixgbe_vlan_rx_add_vid(adapter->netdev, vid);
Auke Kok9a799d72007-09-15 14:07:45 -07003419}
3420
3421/**
Alexander Duyck28500622010-06-15 09:25:48 +00003422 * ixgbe_write_uc_addr_list - write unicast addresses to RAR table
3423 * @netdev: network interface device structure
3424 *
3425 * Writes unicast address list to the RAR table.
3426 * Returns: -ENOMEM on failure/insufficient address space
3427 * 0 on no addresses written
3428 * X on writing X addresses to the RAR table
3429 **/
3430static int ixgbe_write_uc_addr_list(struct net_device *netdev)
3431{
3432 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3433 struct ixgbe_hw *hw = &adapter->hw;
3434 unsigned int vfn = adapter->num_vfs;
3435 unsigned int rar_entries = hw->mac.num_rar_entries - (vfn + 1);
3436 int count = 0;
3437
3438 /* return ENOMEM indicating insufficient memory for addresses */
3439 if (netdev_uc_count(netdev) > rar_entries)
3440 return -ENOMEM;
3441
3442 if (!netdev_uc_empty(netdev) && rar_entries) {
3443 struct netdev_hw_addr *ha;
3444 /* return error if we do not support writing to RAR table */
3445 if (!hw->mac.ops.set_rar)
3446 return -ENOMEM;
3447
3448 netdev_for_each_uc_addr(ha, netdev) {
3449 if (!rar_entries)
3450 break;
3451 hw->mac.ops.set_rar(hw, rar_entries--, ha->addr,
3452 vfn, IXGBE_RAH_AV);
3453 count++;
3454 }
3455 }
3456 /* write the addresses in reverse order to avoid write combining */
3457 for (; rar_entries > 0 ; rar_entries--)
3458 hw->mac.ops.clear_rar(hw, rar_entries);
3459
3460 return count;
3461}
3462
3463/**
Christopher Leech2c5645c2008-08-26 04:27:02 -07003464 * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
Auke Kok9a799d72007-09-15 14:07:45 -07003465 * @netdev: network interface device structure
3466 *
Christopher Leech2c5645c2008-08-26 04:27:02 -07003467 * The set_rx_method entry point is called whenever the unicast/multicast
3468 * address list or the network interface flags are updated. This routine is
3469 * responsible for configuring the hardware for proper unicast, multicast and
3470 * promiscuous mode.
Auke Kok9a799d72007-09-15 14:07:45 -07003471 **/
Greg Rose7f870472010-01-09 02:25:29 +00003472void ixgbe_set_rx_mode(struct net_device *netdev)
Auke Kok9a799d72007-09-15 14:07:45 -07003473{
3474 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3475 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck28500622010-06-15 09:25:48 +00003476 u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
3477 int count;
Auke Kok9a799d72007-09-15 14:07:45 -07003478
3479 /* Check for Promiscuous and All Multicast modes */
3480
3481 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3482
Alexander Duyckf5dc4422010-08-19 13:36:49 +00003483 /* set all bits that we expect to always be set */
3484 fctrl |= IXGBE_FCTRL_BAM;
3485 fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
3486 fctrl |= IXGBE_FCTRL_PMCF;
3487
Alexander Duyck28500622010-06-15 09:25:48 +00003488 /* clear the bits we are changing the status of */
3489 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3490
Auke Kok9a799d72007-09-15 14:07:45 -07003491 if (netdev->flags & IFF_PROMISC) {
Emil Tantilove433ea12010-05-13 17:33:00 +00003492 hw->addr_ctrl.user_set_promisc = true;
Auke Kok9a799d72007-09-15 14:07:45 -07003493 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
Alexander Duyck28500622010-06-15 09:25:48 +00003494 vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE);
Jesse Brandeburg5f6c0182010-04-14 16:04:23 -07003495 /* don't hardware filter vlans in promisc mode */
3496 ixgbe_vlan_filter_disable(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07003497 } else {
Patrick McHardy746b9f02008-07-16 20:15:45 -07003498 if (netdev->flags & IFF_ALLMULTI) {
3499 fctrl |= IXGBE_FCTRL_MPE;
Alexander Duyck28500622010-06-15 09:25:48 +00003500 vmolr |= IXGBE_VMOLR_MPE;
3501 } else {
3502 /*
3503 * Write addresses to the MTA, if the attempt fails
3504 * then we should just turn on promiscous mode so
3505 * that we can at least receive multicast traffic
3506 */
3507 hw->mac.ops.update_mc_addr_list(hw, netdev);
3508 vmolr |= IXGBE_VMOLR_ROMPE;
Patrick McHardy746b9f02008-07-16 20:15:45 -07003509 }
Jesse Brandeburg5f6c0182010-04-14 16:04:23 -07003510 ixgbe_vlan_filter_enable(adapter);
Emil Tantilove433ea12010-05-13 17:33:00 +00003511 hw->addr_ctrl.user_set_promisc = false;
Alexander Duyck28500622010-06-15 09:25:48 +00003512 /*
3513 * Write addresses to available RAR registers, if there is not
3514 * sufficient space to store all the addresses then enable
3515 * unicast promiscous mode
3516 */
3517 count = ixgbe_write_uc_addr_list(netdev);
3518 if (count < 0) {
3519 fctrl |= IXGBE_FCTRL_UPE;
3520 vmolr |= IXGBE_VMOLR_ROPE;
3521 }
3522 }
3523
3524 if (adapter->num_vfs) {
3525 ixgbe_restore_vf_multicasts(adapter);
3526 vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(adapter->num_vfs)) &
3527 ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
3528 IXGBE_VMOLR_ROPE);
3529 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(adapter->num_vfs), vmolr);
Auke Kok9a799d72007-09-15 14:07:45 -07003530 }
3531
3532 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
Jesse Grossf62bbb52010-10-20 13:56:10 +00003533
3534 if (netdev->features & NETIF_F_HW_VLAN_RX)
3535 ixgbe_vlan_strip_enable(adapter);
3536 else
3537 ixgbe_vlan_strip_disable(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07003538}
3539
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003540static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
3541{
3542 int q_idx;
3543 struct ixgbe_q_vector *q_vector;
3544 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
3545
3546 /* legacy and MSI only use one vector */
3547 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
3548 q_vectors = 1;
3549
3550 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
Jesse Brandeburgf0848272008-09-11 19:59:42 -07003551 struct napi_struct *napi;
Alexander Duyck7a921c92009-05-06 10:43:28 +00003552 q_vector = adapter->q_vector[q_idx];
Jesse Brandeburgf0848272008-09-11 19:59:42 -07003553 napi = &q_vector->napi;
Alexander Duyck91281fd2009-06-04 16:00:27 +00003554 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3555 if (!q_vector->rxr_count || !q_vector->txr_count) {
3556 if (q_vector->txr_count == 1)
3557 napi->poll = &ixgbe_clean_txonly;
3558 else if (q_vector->rxr_count == 1)
3559 napi->poll = &ixgbe_clean_rxonly;
3560 }
3561 }
Jesse Brandeburgf0848272008-09-11 19:59:42 -07003562
3563 napi_enable(napi);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003564 }
3565}
3566
3567static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
3568{
3569 int q_idx;
3570 struct ixgbe_q_vector *q_vector;
3571 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
3572
3573 /* legacy and MSI only use one vector */
3574 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
3575 q_vectors = 1;
3576
3577 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
Alexander Duyck7a921c92009-05-06 10:43:28 +00003578 q_vector = adapter->q_vector[q_idx];
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003579 napi_disable(&q_vector->napi);
3580 }
3581}
3582
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -08003583#ifdef CONFIG_IXGBE_DCB
Alexander Duyck2f90b862008-11-20 20:52:10 -08003584/*
3585 * ixgbe_configure_dcb - Configure DCB hardware
3586 * @adapter: ixgbe adapter struct
3587 *
3588 * This is called by the driver on open to configure the DCB hardware.
3589 * This is also called by the gennetlink interface when reconfiguring
3590 * the DCB state.
3591 */
3592static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
3593{
3594 struct ixgbe_hw *hw = &adapter->hw;
John Fastabend98063072010-10-28 00:59:57 +00003595 int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
Alexander Duyck2f90b862008-11-20 20:52:10 -08003596
Alexander Duyck67ebd792010-08-19 13:34:04 +00003597 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
3598 if (hw->mac.type == ixgbe_mac_82598EB)
3599 netif_set_gso_max_size(adapter->netdev, 65536);
3600 return;
3601 }
3602
3603 if (hw->mac.type == ixgbe_mac_82598EB)
3604 netif_set_gso_max_size(adapter->netdev, 32768);
3605
John Fastabend98063072010-10-28 00:59:57 +00003606#ifdef CONFIG_FCOE
3607 if (adapter->netdev->features & NETIF_F_FCOE_MTU)
3608 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
3609#endif
3610
John Fastabend80ab1932010-11-16 19:26:45 -08003611 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
John Fastabend98063072010-10-28 00:59:57 +00003612 DCB_TX_CONFIG);
John Fastabend80ab1932010-11-16 19:26:45 -08003613 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
John Fastabend98063072010-10-28 00:59:57 +00003614 DCB_RX_CONFIG);
Alexander Duyck2f90b862008-11-20 20:52:10 -08003615
Alexander Duyck2f90b862008-11-20 20:52:10 -08003616 /* Enable VLAN tag insert/strip */
Jesse Grossf62bbb52010-10-20 13:56:10 +00003617 adapter->netdev->features |= NETIF_F_HW_VLAN_RX;
Jesse Brandeburg5f6c0182010-04-14 16:04:23 -07003618
Alexander Duyck2f90b862008-11-20 20:52:10 -08003619 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
Alexander Duyck01fa7d92010-11-16 19:26:53 -08003620
3621 /* reconfigure the hardware */
3622 ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
Alexander Duyck2f90b862008-11-20 20:52:10 -08003623}
3624
3625#endif
Auke Kok9a799d72007-09-15 14:07:45 -07003626static void ixgbe_configure(struct ixgbe_adapter *adapter)
3627{
3628 struct net_device *netdev = adapter->netdev;
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00003629 struct ixgbe_hw *hw = &adapter->hw;
Auke Kok9a799d72007-09-15 14:07:45 -07003630 int i;
3631
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -08003632#ifdef CONFIG_IXGBE_DCB
Alexander Duyck67ebd792010-08-19 13:34:04 +00003633 ixgbe_configure_dcb(adapter);
Alexander Duyck2f90b862008-11-20 20:52:10 -08003634#endif
Auke Kok9a799d72007-09-15 14:07:45 -07003635
Jesse Grossf62bbb52010-10-20 13:56:10 +00003636 ixgbe_set_rx_mode(netdev);
3637 ixgbe_restore_vlan(adapter);
3638
Yi Zoueacd73f2009-05-13 13:11:06 +00003639#ifdef IXGBE_FCOE
3640 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
3641 ixgbe_configure_fcoe(adapter);
3642
3643#endif /* IXGBE_FCOE */
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00003644 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
3645 for (i = 0; i < adapter->num_tx_queues; i++)
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00003646 adapter->tx_ring[i]->atr_sample_rate =
Joe Perchese8e9f692010-09-07 21:34:53 +00003647 adapter->atr_sample_rate;
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00003648 ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc);
3649 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
3650 ixgbe_init_fdir_perfect_82599(hw, adapter->fdir_pballoc);
3651 }
Alexander Duyck933d41f2010-09-07 21:34:29 +00003652 ixgbe_configure_virtualization(adapter);
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00003653
Auke Kok9a799d72007-09-15 14:07:45 -07003654 ixgbe_configure_tx(adapter);
3655 ixgbe_configure_rx(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07003656}
3657
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003658static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
3659{
3660 switch (hw->phy.type) {
3661 case ixgbe_phy_sfp_avago:
3662 case ixgbe_phy_sfp_ftl:
3663 case ixgbe_phy_sfp_intel:
3664 case ixgbe_phy_sfp_unknown:
Don Skidmoreea0a04d2010-05-18 16:00:13 +00003665 case ixgbe_phy_sfp_passive_tyco:
3666 case ixgbe_phy_sfp_passive_unknown:
3667 case ixgbe_phy_sfp_active_unknown:
3668 case ixgbe_phy_sfp_ftl_active:
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003669 return true;
3670 default:
3671 return false;
3672 }
3673}
3674
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08003675/**
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003676 * ixgbe_sfp_link_config - set up SFP+ link
3677 * @adapter: pointer to private adapter struct
3678 **/
3679static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
3680{
3681 struct ixgbe_hw *hw = &adapter->hw;
3682
3683 if (hw->phy.multispeed_fiber) {
3684 /*
3685 * In multispeed fiber setups, the device may not have
3686 * had a physical connection when the driver loaded.
3687 * If that's the case, the initial link configuration
3688 * couldn't get the MAC into 10G or 1G mode, so we'll
3689 * never have a link status change interrupt fire.
3690 * We need to try and force an autonegotiation
3691 * session, then bring up link.
3692 */
3693 hw->mac.ops.setup_sfp(hw);
3694 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
3695 schedule_work(&adapter->multispeed_fiber_task);
3696 } else {
3697 /*
3698 * Direct Attach Cu and non-multispeed fiber modules
3699 * still need to be configured properly prior to
3700 * attempting link.
3701 */
3702 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_MOD_TASK))
3703 schedule_work(&adapter->sfp_config_module_task);
3704 }
3705}
3706
3707/**
3708 * ixgbe_non_sfp_link_config - set up non-SFP+ link
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08003709 * @hw: pointer to private hardware struct
3710 *
3711 * Returns 0 on success, negative on failure
3712 **/
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003713static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08003714{
3715 u32 autoneg;
Mallikarjuna R Chilakala8620a102009-09-01 13:49:35 +00003716 bool negotiation, link_up = false;
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08003717 u32 ret = IXGBE_ERR_LINK_SETUP;
3718
3719 if (hw->mac.ops.check_link)
3720 ret = hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
3721
3722 if (ret)
3723 goto link_cfg_out;
3724
3725 if (hw->mac.ops.get_link_capabilities)
Joe Perchese8e9f692010-09-07 21:34:53 +00003726 ret = hw->mac.ops.get_link_capabilities(hw, &autoneg,
3727 &negotiation);
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08003728 if (ret)
3729 goto link_cfg_out;
3730
Mallikarjuna R Chilakala8620a102009-09-01 13:49:35 +00003731 if (hw->mac.ops.setup_link)
3732 ret = hw->mac.ops.setup_link(hw, autoneg, negotiation, link_up);
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08003733link_cfg_out:
3734 return ret;
3735}
3736
Alexander Duycka34bcff2010-08-19 13:39:20 +00003737static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
Auke Kok9a799d72007-09-15 14:07:45 -07003738{
Auke Kok9a799d72007-09-15 14:07:45 -07003739 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duycka34bcff2010-08-19 13:39:20 +00003740 u32 gpie = 0;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003741
Jesse Brandeburg9b471442009-12-03 11:33:54 +00003742 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
Alexander Duycka34bcff2010-08-19 13:39:20 +00003743 gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
3744 IXGBE_GPIE_OCD;
3745 gpie |= IXGBE_GPIE_EIAME;
Jesse Brandeburg9b471442009-12-03 11:33:54 +00003746 /*
3747 * use EIAM to auto-mask when MSI-X interrupt is asserted
3748 * this saves a register write for every interrupt
3749 */
3750 switch (hw->mac.type) {
3751 case ixgbe_mac_82598EB:
3752 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3753 break;
Jesse Brandeburg9b471442009-12-03 11:33:54 +00003754 case ixgbe_mac_82599EB:
Don Skidmoreb93a2222010-11-16 19:27:17 -08003755 case ixgbe_mac_X540:
3756 default:
Jesse Brandeburg9b471442009-12-03 11:33:54 +00003757 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3758 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3759 break;
3760 }
3761 } else {
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003762 /* legacy interrupts, use EIAM to auto-mask when reading EICR,
3763 * specifically only auto mask tx and rx interrupts */
3764 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
Auke Kok9a799d72007-09-15 14:07:45 -07003765 }
3766
Alexander Duycka34bcff2010-08-19 13:39:20 +00003767 /* XXX: to interrupt immediately for EICS writes, enable this */
3768 /* gpie |= IXGBE_GPIE_EIMEN; */
3769
3770 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3771 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
3772 gpie |= IXGBE_GPIE_VTMODE_64;
Mallikarjuna R Chilakala119fc602010-05-20 23:07:06 -07003773 }
3774
Alexander Duycka34bcff2010-08-19 13:39:20 +00003775 /* Enable fan failure interrupt */
3776 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
Jesse Brandeburg0befdb32008-10-31 00:46:40 -07003777 gpie |= IXGBE_SDP1_GPIEN;
Jesse Brandeburg0befdb32008-10-31 00:46:40 -07003778
Alexander Duycka34bcff2010-08-19 13:39:20 +00003779 if (hw->mac.type == ixgbe_mac_82599EB)
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003780 gpie |= IXGBE_SDP1_GPIEN;
3781 gpie |= IXGBE_SDP2_GPIEN;
Alexander Duycka34bcff2010-08-19 13:39:20 +00003782
3783 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3784}
3785
3786static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
3787{
3788 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duycka34bcff2010-08-19 13:39:20 +00003789 int err;
Alexander Duycka34bcff2010-08-19 13:39:20 +00003790 u32 ctrl_ext;
3791
3792 ixgbe_get_hw_control(adapter);
3793 ixgbe_setup_gpie(adapter);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003794
Auke Kok9a799d72007-09-15 14:07:45 -07003795 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
3796 ixgbe_configure_msix(adapter);
3797 else
3798 ixgbe_configure_msi_and_legacy(adapter);
3799
Don Skidmorec6ecf392010-12-03 03:31:51 +00003800 /* enable the optics for both mult-speed fiber and 82599 SFP+ fiber */
3801 if (hw->mac.ops.enable_tx_laser &&
3802 ((hw->phy.multispeed_fiber) ||
3803 ((hw->phy.type == ixgbe_media_type_fiber) &&
3804 (hw->mac.type == ixgbe_mac_82599EB))))
Peter Waskiewicz61fac742010-04-27 00:38:15 +00003805 hw->mac.ops.enable_tx_laser(hw);
3806
Auke Kok9a799d72007-09-15 14:07:45 -07003807 clear_bit(__IXGBE_DOWN, &adapter->state);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003808 ixgbe_napi_enable_all(adapter);
3809
Alexander Duyck73c4b7c2010-11-16 19:26:57 -08003810 if (ixgbe_is_sfp(hw)) {
3811 ixgbe_sfp_link_config(adapter);
3812 } else {
3813 err = ixgbe_non_sfp_link_config(hw);
3814 if (err)
3815 e_err(probe, "link_config FAILED %d\n", err);
3816 }
3817
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003818 /* clear any pending interrupts, may auto mask */
3819 IXGBE_READ_REG(hw, IXGBE_EICR);
Emil Tantilov6af3b9e2010-09-29 21:35:23 +00003820 ixgbe_irq_enable(adapter, true, true);
Auke Kok9a799d72007-09-15 14:07:45 -07003821
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003822 /*
Don Skidmorebf069c92009-05-07 10:39:54 +00003823 * If this adapter has a fan, check to see if we had a failure
3824 * before we enabled the interrupt.
3825 */
3826 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
3827 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
3828 if (esdp & IXGBE_ESDP_SDP1)
Emil Tantilov396e7992010-07-01 20:05:12 +00003829 e_crit(drv, "Fan has stopped, replace the adapter\n");
Don Skidmorebf069c92009-05-07 10:39:54 +00003830 }
3831
3832 /*
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003833 * For hot-pluggable SFP+ devices, a new SFP+ module may have
Don Skidmore19343de2009-07-02 12:50:31 +00003834 * arrived before interrupts were enabled but after probe. Such
3835 * devices wouldn't have their type identified yet. We need to
3836 * kick off the SFP+ module setup first, then try to bring up link.
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003837 * If we're not hot-pluggable SFP+, we just need to configure link
3838 * and bring it up.
3839 */
Alexander Duyck73c4b7c2010-11-16 19:26:57 -08003840 if (hw->phy.type == ixgbe_phy_unknown)
3841 schedule_work(&adapter->sfp_config_module_task);
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08003842
Peter P Waskiewicz Jr1da100b2009-01-19 16:55:03 -08003843 /* enable transmits */
Alexander Duyck477de6e2010-08-19 13:38:11 +00003844 netif_tx_start_all_queues(adapter->netdev);
Peter P Waskiewicz Jr1da100b2009-01-19 16:55:03 -08003845
Auke Kok9a799d72007-09-15 14:07:45 -07003846 /* bring the link up in the watchdog, this could race with our first
3847 * link up interrupt but shouldn't be a problem */
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07003848 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
3849 adapter->link_check_timeout = jiffies;
Auke Kok9a799d72007-09-15 14:07:45 -07003850 mod_timer(&adapter->watchdog_timer, jiffies);
Greg Rosec9205692010-01-22 22:46:22 +00003851
3852 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
3853 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3854 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3855 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3856
Auke Kok9a799d72007-09-15 14:07:45 -07003857 return 0;
3858}
3859
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08003860void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
3861{
3862 WARN_ON(in_interrupt());
3863 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
3864 msleep(1);
3865 ixgbe_down(adapter);
Greg Rose5809a1a2010-03-24 09:36:08 +00003866 /*
3867 * If SR-IOV enabled then wait a bit before bringing the adapter
3868 * back up to give the VFs time to respond to the reset. The
3869 * two second wait is based upon the watchdog timer cycle in
3870 * the VF driver.
3871 */
3872 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3873 msleep(2000);
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08003874 ixgbe_up(adapter);
3875 clear_bit(__IXGBE_RESETTING, &adapter->state);
3876}
3877
Auke Kok9a799d72007-09-15 14:07:45 -07003878int ixgbe_up(struct ixgbe_adapter *adapter)
3879{
3880 /* hardware has been reset, we need to reload some things */
3881 ixgbe_configure(adapter);
3882
3883 return ixgbe_up_complete(adapter);
3884}
3885
3886void ixgbe_reset(struct ixgbe_adapter *adapter)
3887{
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07003888 struct ixgbe_hw *hw = &adapter->hw;
Don Skidmore8ca783a2009-05-26 20:40:47 -07003889 int err;
3890
3891 err = hw->mac.ops.init_hw(hw);
Peter P Waskiewicz Jrda4dd0f2009-06-04 11:10:35 +00003892 switch (err) {
3893 case 0:
3894 case IXGBE_ERR_SFP_NOT_PRESENT:
3895 break;
3896 case IXGBE_ERR_MASTER_REQUESTS_PENDING:
Emil Tantilov849c4542010-06-03 16:53:41 +00003897 e_dev_err("master disable timed out\n");
Peter P Waskiewicz Jrda4dd0f2009-06-04 11:10:35 +00003898 break;
Peter P Waskiewicz Jr794caeb2009-06-04 16:02:24 +00003899 case IXGBE_ERR_EEPROM_VERSION:
3900 /* We are running on a pre-production device, log a warning */
Emil Tantilov849c4542010-06-03 16:53:41 +00003901 e_dev_warn("This device is a pre-production adapter/LOM. "
3902 "Please be aware there may be issuesassociated with "
3903 "your hardware. If you are experiencing problems "
3904 "please contact your Intel or hardware "
3905 "representative who provided you with this "
3906 "hardware.\n");
Peter P Waskiewicz Jr794caeb2009-06-04 16:02:24 +00003907 break;
Peter P Waskiewicz Jrda4dd0f2009-06-04 11:10:35 +00003908 default:
Emil Tantilov849c4542010-06-03 16:53:41 +00003909 e_dev_err("Hardware Error: %d\n", err);
Peter P Waskiewicz Jrda4dd0f2009-06-04 11:10:35 +00003910 }
Auke Kok9a799d72007-09-15 14:07:45 -07003911
3912 /* reprogram the RAR[0] in case user changed it. */
Greg Rose1cdd1ec2010-01-09 02:26:46 +00003913 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
3914 IXGBE_RAH_AV);
Auke Kok9a799d72007-09-15 14:07:45 -07003915}
3916
Auke Kok9a799d72007-09-15 14:07:45 -07003917/**
3918 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
Auke Kok9a799d72007-09-15 14:07:45 -07003919 * @rx_ring: ring to free buffers from
3920 **/
Alexander Duyckb6ec8952010-11-16 19:26:49 -08003921static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07003922{
Alexander Duyckb6ec8952010-11-16 19:26:49 -08003923 struct device *dev = rx_ring->dev;
Auke Kok9a799d72007-09-15 14:07:45 -07003924 unsigned long size;
Alexander Duyckb6ec8952010-11-16 19:26:49 -08003925 u16 i;
Auke Kok9a799d72007-09-15 14:07:45 -07003926
Alexander Duyck84418e32010-08-19 13:40:54 +00003927 /* ring already cleared, nothing to do */
3928 if (!rx_ring->rx_buffer_info)
3929 return;
Auke Kok9a799d72007-09-15 14:07:45 -07003930
Alexander Duyck84418e32010-08-19 13:40:54 +00003931 /* Free all the Rx ring sk_buffs */
Auke Kok9a799d72007-09-15 14:07:45 -07003932 for (i = 0; i < rx_ring->count; i++) {
3933 struct ixgbe_rx_buffer *rx_buffer_info;
3934
3935 rx_buffer_info = &rx_ring->rx_buffer_info[i];
3936 if (rx_buffer_info->dma) {
Alexander Duyckb6ec8952010-11-16 19:26:49 -08003937 dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
Joe Perchese8e9f692010-09-07 21:34:53 +00003938 rx_ring->rx_buf_len,
Nick Nunley1b507732010-04-27 13:10:27 +00003939 DMA_FROM_DEVICE);
Auke Kok9a799d72007-09-15 14:07:45 -07003940 rx_buffer_info->dma = 0;
3941 }
3942 if (rx_buffer_info->skb) {
Alexander Duyckf8212f92009-04-27 22:42:37 +00003943 struct sk_buff *skb = rx_buffer_info->skb;
Auke Kok9a799d72007-09-15 14:07:45 -07003944 rx_buffer_info->skb = NULL;
Alexander Duyckf8212f92009-04-27 22:42:37 +00003945 do {
3946 struct sk_buff *this = skb;
Mallikarjuna R Chilakalae8171aa2010-05-13 17:33:21 +00003947 if (IXGBE_RSC_CB(this)->delay_unmap) {
Alexander Duyckb6ec8952010-11-16 19:26:49 -08003948 dma_unmap_single(dev,
Nick Nunley1b507732010-04-27 13:10:27 +00003949 IXGBE_RSC_CB(this)->dma,
Joe Perchese8e9f692010-09-07 21:34:53 +00003950 rx_ring->rx_buf_len,
Nick Nunley1b507732010-04-27 13:10:27 +00003951 DMA_FROM_DEVICE);
Mallikarjuna R Chilakalafd3686a2010-03-19 04:41:33 +00003952 IXGBE_RSC_CB(this)->dma = 0;
Mallikarjuna R Chilakalae8171aa2010-05-13 17:33:21 +00003953 IXGBE_RSC_CB(skb)->delay_unmap = false;
Mallikarjuna R Chilakalafd3686a2010-03-19 04:41:33 +00003954 }
Alexander Duyckf8212f92009-04-27 22:42:37 +00003955 skb = skb->prev;
3956 dev_kfree_skb(this);
3957 } while (skb);
Auke Kok9a799d72007-09-15 14:07:45 -07003958 }
3959 if (!rx_buffer_info->page)
3960 continue;
Jesse Brandeburg4f57ca62009-06-30 11:44:56 +00003961 if (rx_buffer_info->page_dma) {
Alexander Duyckb6ec8952010-11-16 19:26:49 -08003962 dma_unmap_page(dev, rx_buffer_info->page_dma,
Nick Nunley1b507732010-04-27 13:10:27 +00003963 PAGE_SIZE / 2, DMA_FROM_DEVICE);
Jesse Brandeburg4f57ca62009-06-30 11:44:56 +00003964 rx_buffer_info->page_dma = 0;
3965 }
Auke Kok9a799d72007-09-15 14:07:45 -07003966 put_page(rx_buffer_info->page);
3967 rx_buffer_info->page = NULL;
Jesse Brandeburg762f4c52008-09-11 19:58:43 -07003968 rx_buffer_info->page_offset = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07003969 }
3970
3971 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
3972 memset(rx_ring->rx_buffer_info, 0, size);
3973
3974 /* Zero out the descriptor ring */
3975 memset(rx_ring->desc, 0, rx_ring->size);
3976
3977 rx_ring->next_to_clean = 0;
3978 rx_ring->next_to_use = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07003979}
3980
3981/**
3982 * ixgbe_clean_tx_ring - Free Tx Buffers
Auke Kok9a799d72007-09-15 14:07:45 -07003983 * @tx_ring: ring to be cleaned
3984 **/
Alexander Duyckb6ec8952010-11-16 19:26:49 -08003985static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07003986{
3987 struct ixgbe_tx_buffer *tx_buffer_info;
3988 unsigned long size;
Alexander Duyckb6ec8952010-11-16 19:26:49 -08003989 u16 i;
Auke Kok9a799d72007-09-15 14:07:45 -07003990
Alexander Duyck84418e32010-08-19 13:40:54 +00003991 /* ring already cleared, nothing to do */
3992 if (!tx_ring->tx_buffer_info)
3993 return;
Auke Kok9a799d72007-09-15 14:07:45 -07003994
Alexander Duyck84418e32010-08-19 13:40:54 +00003995 /* Free all the Tx ring sk_buffs */
Auke Kok9a799d72007-09-15 14:07:45 -07003996 for (i = 0; i < tx_ring->count; i++) {
3997 tx_buffer_info = &tx_ring->tx_buffer_info[i];
Alexander Duyckb6ec8952010-11-16 19:26:49 -08003998 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
Auke Kok9a799d72007-09-15 14:07:45 -07003999 }
4000
4001 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
4002 memset(tx_ring->tx_buffer_info, 0, size);
4003
4004 /* Zero out the descriptor ring */
4005 memset(tx_ring->desc, 0, tx_ring->size);
4006
4007 tx_ring->next_to_use = 0;
4008 tx_ring->next_to_clean = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07004009}
4010
4011/**
Auke Kok9a799d72007-09-15 14:07:45 -07004012 * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
4013 * @adapter: board private structure
4014 **/
4015static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
4016{
4017 int i;
4018
4019 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyckb6ec8952010-11-16 19:26:49 -08004020 ixgbe_clean_rx_ring(adapter->rx_ring[i]);
Auke Kok9a799d72007-09-15 14:07:45 -07004021}
4022
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004023/**
4024 * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
4025 * @adapter: board private structure
4026 **/
4027static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
4028{
4029 int i;
4030
4031 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyckb6ec8952010-11-16 19:26:49 -08004032 ixgbe_clean_tx_ring(adapter->tx_ring[i]);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004033}
4034
Auke Kok9a799d72007-09-15 14:07:45 -07004035void ixgbe_down(struct ixgbe_adapter *adapter)
4036{
4037 struct net_device *netdev = adapter->netdev;
Jesse Brandeburg7f821872008-09-11 20:00:16 -07004038 struct ixgbe_hw *hw = &adapter->hw;
Auke Kok9a799d72007-09-15 14:07:45 -07004039 u32 rxctrl;
Jesse Brandeburg7f821872008-09-11 20:00:16 -07004040 u32 txdctl;
Alexander Duyckbf29ee62010-11-16 19:27:07 -08004041 int i;
Peter Waskiewiczb25ebfd2010-10-05 01:27:49 +00004042 int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
Auke Kok9a799d72007-09-15 14:07:45 -07004043
4044 /* signal that we are down to the interrupt handler */
4045 set_bit(__IXGBE_DOWN, &adapter->state);
4046
Greg Rose767081a2010-01-22 22:46:40 +00004047 /* disable receive for all VFs and wait one second */
4048 if (adapter->num_vfs) {
Greg Rose767081a2010-01-22 22:46:40 +00004049 /* ping all the active vfs to let them know we are going down */
4050 ixgbe_ping_all_vfs(adapter);
Greg Rose581d1aa2010-03-24 09:36:27 +00004051
Greg Rose767081a2010-01-22 22:46:40 +00004052 /* Disable all VFTE/VFRE TX/RX */
4053 ixgbe_disable_tx_rx(adapter);
Greg Rose581d1aa2010-03-24 09:36:27 +00004054
4055 /* Mark all the VFs as inactive */
4056 for (i = 0 ; i < adapter->num_vfs; i++)
4057 adapter->vfinfo[i].clear_to_send = 0;
Greg Rose767081a2010-01-22 22:46:40 +00004058 }
4059
Auke Kok9a799d72007-09-15 14:07:45 -07004060 /* disable receives */
Jesse Brandeburg7f821872008-09-11 20:00:16 -07004061 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4062 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
Auke Kok9a799d72007-09-15 14:07:45 -07004063
Jesse Brandeburg7f821872008-09-11 20:00:16 -07004064 IXGBE_WRITE_FLUSH(hw);
Auke Kok9a799d72007-09-15 14:07:45 -07004065 msleep(10);
4066
Jesse Brandeburg7f821872008-09-11 20:00:16 -07004067 netif_tx_stop_all_queues(netdev);
4068
Don Skidmore0a1f87c2009-09-18 09:45:43 +00004069 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
4070 del_timer_sync(&adapter->sfp_timer);
Auke Kok9a799d72007-09-15 14:07:45 -07004071 del_timer_sync(&adapter->watchdog_timer);
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07004072 cancel_work_sync(&adapter->watchdog_task);
Auke Kok9a799d72007-09-15 14:07:45 -07004073
John Fastabendc0dfb902010-04-27 02:13:39 +00004074 netif_carrier_off(netdev);
4075 netif_tx_disable(netdev);
4076
4077 ixgbe_irq_disable(adapter);
4078
4079 ixgbe_napi_disable_all(adapter);
4080
Peter Waskiewiczb25ebfd2010-10-05 01:27:49 +00004081 /* Cleanup the affinity_hint CPU mask memory and callback */
4082 for (i = 0; i < num_q_vectors; i++) {
4083 struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
4084 /* clear the affinity_mask in the IRQ descriptor */
4085 irq_set_affinity_hint(adapter->msix_entries[i]. vector, NULL);
4086 /* release the CPU mask memory */
4087 free_cpumask_var(q_vector->affinity_mask);
4088 }
4089
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00004090 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
4091 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
4092 cancel_work_sync(&adapter->fdir_reinit_task);
4093
Mallikarjuna R Chilakala119fc602010-05-20 23:07:06 -07004094 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
4095 cancel_work_sync(&adapter->check_overtemp_task);
4096
Jesse Brandeburg7f821872008-09-11 20:00:16 -07004097 /* disable transmits in the hardware now that interrupts are off */
4098 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyckbf29ee62010-11-16 19:27:07 -08004099 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
4100 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
4101 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx),
Joe Perchese8e9f692010-09-07 21:34:53 +00004102 (txdctl & ~IXGBE_TXDCTL_ENABLE));
Jesse Brandeburg7f821872008-09-11 20:00:16 -07004103 }
PJ Waskiewicz88512532009-03-13 22:15:10 +00004104 /* Disable the Tx DMA engine on 82599 */
Alexander Duyckbd508172010-11-16 19:27:03 -08004105 switch (hw->mac.type) {
4106 case ixgbe_mac_82599EB:
Don Skidmoreb93a2222010-11-16 19:27:17 -08004107 case ixgbe_mac_X540:
PJ Waskiewicz88512532009-03-13 22:15:10 +00004108 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
Joe Perchese8e9f692010-09-07 21:34:53 +00004109 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
4110 ~IXGBE_DMATXCTL_TE));
Alexander Duyckbd508172010-11-16 19:27:03 -08004111 break;
4112 default:
4113 break;
4114 }
Jesse Brandeburg7f821872008-09-11 20:00:16 -07004115
Peter Waskiewicz9a713e72010-02-10 16:07:54 +00004116 /* clear n-tuple filters that are cached */
4117 ethtool_ntuple_flush(netdev);
4118
Paul Larson6f4a0e42008-06-24 17:00:56 -07004119 if (!pci_channel_offline(adapter->pdev))
4120 ixgbe_reset(adapter);
Don Skidmorec6ecf392010-12-03 03:31:51 +00004121
4122 /* power down the optics for multispeed fiber and 82599 SFP+ fiber */
4123 if (hw->mac.ops.disable_tx_laser &&
4124 ((hw->phy.multispeed_fiber) ||
4125 ((hw->phy.type == ixgbe_media_type_fiber) &&
4126 (hw->mac.type == ixgbe_mac_82599EB))))
4127 hw->mac.ops.disable_tx_laser(hw);
4128
Auke Kok9a799d72007-09-15 14:07:45 -07004129 ixgbe_clean_all_tx_rings(adapter);
4130 ixgbe_clean_all_rx_rings(adapter);
4131
Jeff Garzik5dd2d332008-10-16 05:09:31 -04004132#ifdef CONFIG_IXGBE_DCA
Jesse Brandeburg96b0e0f2008-08-26 04:27:21 -07004133 /* since we reset the hardware DCA settings were cleared */
Alexander Duycke35ec122009-05-21 13:07:12 +00004134 ixgbe_setup_dca(adapter);
Jesse Brandeburg96b0e0f2008-08-26 04:27:21 -07004135#endif
Auke Kok9a799d72007-09-15 14:07:45 -07004136}
4137
Auke Kok9a799d72007-09-15 14:07:45 -07004138/**
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004139 * ixgbe_poll - NAPI Rx polling callback
4140 * @napi: structure for representing this polling device
4141 * @budget: how many packets driver is allowed to clean
4142 *
4143 * This function is used for legacy and MSI, NAPI mode
Auke Kok9a799d72007-09-15 14:07:45 -07004144 **/
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004145static int ixgbe_poll(struct napi_struct *napi, int budget)
Auke Kok9a799d72007-09-15 14:07:45 -07004146{
Jesse Brandeburg9a1a69ad2009-03-13 22:14:10 +00004147 struct ixgbe_q_vector *q_vector =
Joe Perchese8e9f692010-09-07 21:34:53 +00004148 container_of(napi, struct ixgbe_q_vector, napi);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004149 struct ixgbe_adapter *adapter = q_vector->adapter;
Jesse Brandeburg9a1a69ad2009-03-13 22:14:10 +00004150 int tx_clean_complete, work_done = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07004151
Jeff Garzik5dd2d332008-10-16 05:09:31 -04004152#ifdef CONFIG_IXGBE_DCA
Alexander Duyck33cf09c2010-11-16 19:26:55 -08004153 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
4154 ixgbe_update_dca(q_vector);
Jeb Cramerbd0362d2008-03-03 15:04:02 -08004155#endif
4156
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00004157 tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]);
4158 ixgbe_clean_rx_irq(q_vector, adapter->rx_ring[0], &work_done, budget);
Auke Kok9a799d72007-09-15 14:07:45 -07004159
Jesse Brandeburg9a1a69ad2009-03-13 22:14:10 +00004160 if (!tx_clean_complete)
David S. Millerd2c7ddd2008-01-15 22:43:24 -08004161 work_done = budget;
4162
David S. Miller53e52c72008-01-07 21:06:12 -08004163 /* If budget not fully consumed, exit the polling mode */
4164 if (work_done < budget) {
Ben Hutchings288379f2009-01-19 16:43:59 -08004165 napi_complete(napi);
Nelson, Shannonf7554a22009-09-18 09:46:06 +00004166 if (adapter->rx_itr_setting & 1)
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08004167 ixgbe_set_itr(adapter);
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08004168 if (!test_bit(__IXGBE_DOWN, &adapter->state))
Nelson, Shannon835462f2009-04-27 22:42:54 +00004169 ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE);
Auke Kok9a799d72007-09-15 14:07:45 -07004170 }
Auke Kok9a799d72007-09-15 14:07:45 -07004171 return work_done;
4172}
4173
4174/**
4175 * ixgbe_tx_timeout - Respond to a Tx Hang
4176 * @netdev: network interface device structure
4177 **/
4178static void ixgbe_tx_timeout(struct net_device *netdev)
4179{
4180 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4181
John Fastabendc84d3242010-11-16 19:27:12 -08004182 adapter->tx_timeout_count++;
4183
Auke Kok9a799d72007-09-15 14:07:45 -07004184 /* Do the reset outside of interrupt context */
4185 schedule_work(&adapter->reset_task);
4186}
4187
4188static void ixgbe_reset_task(struct work_struct *work)
4189{
4190 struct ixgbe_adapter *adapter;
4191 adapter = container_of(work, struct ixgbe_adapter, reset_task);
4192
Alexander Duyck2f90b862008-11-20 20:52:10 -08004193 /* If we're already down or resetting, just bail */
4194 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
4195 test_bit(__IXGBE_RESETTING, &adapter->state))
4196 return;
4197
Taku Izumidcd79ae2010-04-27 14:39:53 +00004198 ixgbe_dump(adapter);
4199 netdev_err(adapter->netdev, "Reset adapter\n");
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08004200 ixgbe_reinit_locked(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07004201}
4202
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08004203#ifdef CONFIG_IXGBE_DCB
4204static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
Jesse Brandeburgb9804972008-09-11 20:00:29 -07004205{
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08004206 bool ret = false;
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00004207 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB];
Jesse Brandeburgb9804972008-09-11 20:00:29 -07004208
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00004209 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
4210 return ret;
4211
4212 f->mask = 0x7 << 3;
4213 adapter->num_rx_queues = f->indices;
4214 adapter->num_tx_queues = f->indices;
4215 ret = true;
Jesse Brandeburgb9804972008-09-11 20:00:29 -07004216
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08004217 return ret;
4218}
4219#endif
4220
Jesse Brandeburg4df10462009-03-13 22:15:31 +00004221/**
4222 * ixgbe_set_rss_queues: Allocate queues for RSS
4223 * @adapter: board private structure to initialize
4224 *
4225 * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
4226 * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
4227 *
4228 **/
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08004229static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
4230{
4231 bool ret = false;
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00004232 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS];
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08004233
4234 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00004235 f->mask = 0xF;
4236 adapter->num_rx_queues = f->indices;
4237 adapter->num_tx_queues = f->indices;
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08004238 ret = true;
4239 } else {
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08004240 ret = false;
4241 }
4242
4243 return ret;
4244}
4245
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00004246/**
4247 * ixgbe_set_fdir_queues: Allocate queues for Flow Director
4248 * @adapter: board private structure to initialize
4249 *
4250 * Flow Director is an advanced Rx filter, attempting to get Rx flows back
4251 * to the original CPU that initiated the Tx session. This runs in addition
4252 * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the
4253 * Rx load across CPUs using RSS.
4254 *
4255 **/
Joe Perchese8e9f692010-09-07 21:34:53 +00004256static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00004257{
4258 bool ret = false;
4259 struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR];
4260
4261 f_fdir->indices = min((int)num_online_cpus(), f_fdir->indices);
4262 f_fdir->mask = 0;
4263
4264 /* Flow Director must have RSS enabled */
4265 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED &&
4266 ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
4267 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)))) {
4268 adapter->num_tx_queues = f_fdir->indices;
4269 adapter->num_rx_queues = f_fdir->indices;
4270 ret = true;
4271 } else {
4272 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
4273 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
4274 }
4275 return ret;
4276}
4277
Yi Zou0331a832009-05-17 12:33:52 +00004278#ifdef IXGBE_FCOE
4279/**
4280 * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE)
4281 * @adapter: board private structure to initialize
4282 *
4283 * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges.
4284 * The ring feature mask is not used as a mask for FCoE, as it can take any 8
4285 * rx queues out of the max number of rx queues, instead, it is used as the
4286 * index of the first rx queue used by FCoE.
4287 *
4288 **/
4289static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
4290{
4291 bool ret = false;
4292 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
4293
4294 f->indices = min((int)num_online_cpus(), f->indices);
4295 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
Yi Zou8de8b2e2009-09-03 14:55:50 +00004296 adapter->num_rx_queues = 1;
4297 adapter->num_tx_queues = 1;
Yi Zou0331a832009-05-17 12:33:52 +00004298#ifdef CONFIG_IXGBE_DCB
4299 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
Emil Tantilov396e7992010-07-01 20:05:12 +00004300 e_info(probe, "FCoE enabled with DCB\n");
Yi Zou0331a832009-05-17 12:33:52 +00004301 ixgbe_set_dcb_queues(adapter);
4302 }
4303#endif
4304 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
Emil Tantilov396e7992010-07-01 20:05:12 +00004305 e_info(probe, "FCoE enabled with RSS\n");
Yi Zou8faa2a72009-07-09 02:29:50 +00004306 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
4307 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
4308 ixgbe_set_fdir_queues(adapter);
4309 else
4310 ixgbe_set_rss_queues(adapter);
Yi Zou0331a832009-05-17 12:33:52 +00004311 }
4312 /* adding FCoE rx rings to the end */
4313 f->mask = adapter->num_rx_queues;
4314 adapter->num_rx_queues += f->indices;
Yi Zou8de8b2e2009-09-03 14:55:50 +00004315 adapter->num_tx_queues += f->indices;
Yi Zou0331a832009-05-17 12:33:52 +00004316
4317 ret = true;
4318 }
4319
4320 return ret;
4321}
4322
4323#endif /* IXGBE_FCOE */
Greg Rose1cdd1ec2010-01-09 02:26:46 +00004324/**
4325 * ixgbe_set_sriov_queues: Allocate queues for IOV use
4326 * @adapter: board private structure to initialize
4327 *
4328 * IOV doesn't actually use anything, so just NAK the
4329 * request for now and let the other queue routines
4330 * figure out what to do.
4331 */
4332static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
4333{
4334 return false;
4335}
4336
Jesse Brandeburg4df10462009-03-13 22:15:31 +00004337/*
4338 * ixgbe_set_num_queues: Allocate queues for device, feature dependant
4339 * @adapter: board private structure to initialize
4340 *
4341 * This is the top level queue allocation routine. The order here is very
4342 * important, starting with the "most" number of features turned on at once,
4343 * and ending with the smallest set of features. This way large combinations
4344 * can be allocated if they're turned on, and smaller combinations are the
4345 * fallthrough conditions.
4346 *
4347 **/
Ben Hutchings847f53f2010-09-27 08:28:56 +00004348static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08004349{
Greg Rose1cdd1ec2010-01-09 02:26:46 +00004350 /* Start with base case */
4351 adapter->num_rx_queues = 1;
4352 adapter->num_tx_queues = 1;
4353 adapter->num_rx_pools = adapter->num_rx_queues;
4354 adapter->num_rx_queues_per_pool = 1;
4355
4356 if (ixgbe_set_sriov_queues(adapter))
Ben Hutchings847f53f2010-09-27 08:28:56 +00004357 goto done;
Greg Rose1cdd1ec2010-01-09 02:26:46 +00004358
Yi Zou0331a832009-05-17 12:33:52 +00004359#ifdef IXGBE_FCOE
4360 if (ixgbe_set_fcoe_queues(adapter))
4361 goto done;
4362
4363#endif /* IXGBE_FCOE */
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08004364#ifdef CONFIG_IXGBE_DCB
4365 if (ixgbe_set_dcb_queues(adapter))
Wu Fengguangaf22ab12009-04-14 21:54:07 -07004366 goto done;
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08004367
4368#endif
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00004369 if (ixgbe_set_fdir_queues(adapter))
4370 goto done;
4371
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08004372 if (ixgbe_set_rss_queues(adapter))
Wu Fengguangaf22ab12009-04-14 21:54:07 -07004373 goto done;
4374
4375 /* fallback to base case */
4376 adapter->num_rx_queues = 1;
4377 adapter->num_tx_queues = 1;
4378
4379done:
Ben Hutchings847f53f2010-09-27 08:28:56 +00004380 /* Notify the stack of the (possibly) reduced queue counts. */
John Fastabendf0796d52010-07-01 13:21:57 +00004381 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
Ben Hutchings847f53f2010-09-27 08:28:56 +00004382 return netif_set_real_num_rx_queues(adapter->netdev,
4383 adapter->num_rx_queues);
Jesse Brandeburgb9804972008-09-11 20:00:29 -07004384}
4385
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004386static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
Joe Perchese8e9f692010-09-07 21:34:53 +00004387 int vectors)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004388{
4389 int err, vector_threshold;
4390
4391 /* We'll want at least 3 (vector_threshold):
4392 * 1) TxQ[0] Cleanup
4393 * 2) RxQ[0] Cleanup
4394 * 3) Other (Link Status Change, etc.)
4395 * 4) TCP Timer (optional)
4396 */
4397 vector_threshold = MIN_MSIX_COUNT;
4398
4399 /* The more we get, the more we will assign to Tx/Rx Cleanup
4400 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
4401 * Right now, we simply care about how many we'll get; we'll
4402 * set them up later while requesting irq's.
4403 */
4404 while (vectors >= vector_threshold) {
4405 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
Joe Perchese8e9f692010-09-07 21:34:53 +00004406 vectors);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004407 if (!err) /* Success in acquiring all requested vectors. */
4408 break;
4409 else if (err < 0)
4410 vectors = 0; /* Nasty failure, quit now */
4411 else /* err == number of vectors we should try again with */
4412 vectors = err;
4413 }
4414
4415 if (vectors < vector_threshold) {
4416 /* Can't allocate enough MSI-X interrupts? Oh well.
4417 * This just means we'll go with either a single MSI
4418 * vector or fall back to legacy interrupts.
4419 */
Emil Tantilov849c4542010-06-03 16:53:41 +00004420 netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
4421 "Unable to allocate MSI-X interrupts\n");
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004422 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
4423 kfree(adapter->msix_entries);
4424 adapter->msix_entries = NULL;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004425 } else {
4426 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
Peter P Waskiewicz Jreb7f1392009-02-01 01:18:58 -08004427 /*
4428 * Adjust for only the vectors we'll use, which is minimum
4429 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
4430 * vectors we were allocated.
4431 */
4432 adapter->num_msix_vectors = min(vectors,
Joe Perchese8e9f692010-09-07 21:34:53 +00004433 adapter->max_msix_q_vectors + NON_Q_VECTORS);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004434 }
4435}
4436
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004437/**
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08004438 * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004439 * @adapter: board private structure to initialize
4440 *
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08004441 * Cache the descriptor ring offsets for RSS to the assigned rings.
4442 *
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004443 **/
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08004444static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004445{
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08004446 int i;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004447
Alexander Duyck9d6b7582010-11-16 19:27:06 -08004448 if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
4449 return false;
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08004450
Alexander Duyck9d6b7582010-11-16 19:27:06 -08004451 for (i = 0; i < adapter->num_rx_queues; i++)
4452 adapter->rx_ring[i]->reg_idx = i;
4453 for (i = 0; i < adapter->num_tx_queues; i++)
4454 adapter->tx_ring[i]->reg_idx = i;
4455
4456 return true;
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08004457}
4458
4459#ifdef CONFIG_IXGBE_DCB
4460/**
4461 * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
4462 * @adapter: board private structure to initialize
4463 *
4464 * Cache the descriptor ring offsets for DCB to the assigned rings.
4465 *
4466 **/
4467static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
4468{
4469 int i;
4470 bool ret = false;
4471 int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
4472
Alexander Duyckbd508172010-11-16 19:27:03 -08004473 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
4474 return false;
4475
4476 /* the number of queues is assumed to be symmetric */
4477 switch (adapter->hw.mac.type) {
4478 case ixgbe_mac_82598EB:
4479 for (i = 0; i < dcb_i; i++) {
4480 adapter->rx_ring[i]->reg_idx = i << 3;
4481 adapter->tx_ring[i]->reg_idx = i << 2;
4482 }
4483 ret = true;
4484 break;
4485 case ixgbe_mac_82599EB:
Don Skidmoreb93a2222010-11-16 19:27:17 -08004486 case ixgbe_mac_X540:
Alexander Duyckbd508172010-11-16 19:27:03 -08004487 if (dcb_i == 8) {
4488 /*
4489 * Tx TC0 starts at: descriptor queue 0
4490 * Tx TC1 starts at: descriptor queue 32
4491 * Tx TC2 starts at: descriptor queue 64
4492 * Tx TC3 starts at: descriptor queue 80
4493 * Tx TC4 starts at: descriptor queue 96
4494 * Tx TC5 starts at: descriptor queue 104
4495 * Tx TC6 starts at: descriptor queue 112
4496 * Tx TC7 starts at: descriptor queue 120
4497 *
4498 * Rx TC0-TC7 are offset by 16 queues each
4499 */
4500 for (i = 0; i < 3; i++) {
4501 adapter->tx_ring[i]->reg_idx = i << 5;
4502 adapter->rx_ring[i]->reg_idx = i << 4;
4503 }
4504 for ( ; i < 5; i++) {
4505 adapter->tx_ring[i]->reg_idx = ((i + 2) << 4);
4506 adapter->rx_ring[i]->reg_idx = i << 4;
4507 }
4508 for ( ; i < dcb_i; i++) {
4509 adapter->tx_ring[i]->reg_idx = ((i + 8) << 3);
4510 adapter->rx_ring[i]->reg_idx = i << 4;
Alexander Duyck2f90b862008-11-20 20:52:10 -08004511 }
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08004512 ret = true;
Alexander Duyckbd508172010-11-16 19:27:03 -08004513 } else if (dcb_i == 4) {
4514 /*
4515 * Tx TC0 starts at: descriptor queue 0
4516 * Tx TC1 starts at: descriptor queue 64
4517 * Tx TC2 starts at: descriptor queue 96
4518 * Tx TC3 starts at: descriptor queue 112
4519 *
4520 * Rx TC0-TC3 are offset by 32 queues each
4521 */
4522 adapter->tx_ring[0]->reg_idx = 0;
4523 adapter->tx_ring[1]->reg_idx = 64;
4524 adapter->tx_ring[2]->reg_idx = 96;
4525 adapter->tx_ring[3]->reg_idx = 112;
4526 for (i = 0 ; i < dcb_i; i++)
4527 adapter->rx_ring[i]->reg_idx = i << 5;
4528 ret = true;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004529 }
Alexander Duyckbd508172010-11-16 19:27:03 -08004530 break;
4531 default:
4532 break;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004533 }
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08004534 return ret;
4535}
4536#endif
4537
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00004538/**
4539 * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director
4540 * @adapter: board private structure to initialize
4541 *
4542 * Cache the descriptor ring offsets for Flow Director to the assigned rings.
4543 *
4544 **/
Joe Perchese8e9f692010-09-07 21:34:53 +00004545static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00004546{
4547 int i;
4548 bool ret = false;
4549
4550 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED &&
4551 ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
4552 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))) {
4553 for (i = 0; i < adapter->num_rx_queues; i++)
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00004554 adapter->rx_ring[i]->reg_idx = i;
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00004555 for (i = 0; i < adapter->num_tx_queues; i++)
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00004556 adapter->tx_ring[i]->reg_idx = i;
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00004557 ret = true;
4558 }
4559
4560 return ret;
4561}
4562
Yi Zou0331a832009-05-17 12:33:52 +00004563#ifdef IXGBE_FCOE
4564/**
4565 * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE
4566 * @adapter: board private structure to initialize
4567 *
4568 * Cache the descriptor ring offsets for FCoE mode to the assigned rings.
4569 *
4570 */
4571static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
4572{
Yi Zou0331a832009-05-17 12:33:52 +00004573 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
Alexander Duyckbf29ee62010-11-16 19:27:07 -08004574 int i;
4575 u8 fcoe_rx_i = 0, fcoe_tx_i = 0;
Yi Zou0331a832009-05-17 12:33:52 +00004576
Alexander Duyckbf29ee62010-11-16 19:27:07 -08004577 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
4578 return false;
4579
Yi Zou0331a832009-05-17 12:33:52 +00004580#ifdef CONFIG_IXGBE_DCB
Alexander Duyckbf29ee62010-11-16 19:27:07 -08004581 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
4582 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
Yi Zou8de8b2e2009-09-03 14:55:50 +00004583
Alexander Duyckbf29ee62010-11-16 19:27:07 -08004584 ixgbe_cache_ring_dcb(adapter);
4585 /* find out queues in TC for FCoE */
4586 fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1;
4587 fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1;
4588 /*
4589 * In 82599, the number of Tx queues for each traffic
4590 * class for both 8-TC and 4-TC modes are:
4591 * TCs : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7
4592 * 8 TCs: 32 32 16 16 8 8 8 8
4593 * 4 TCs: 64 64 32 32
4594 * We have max 8 queues for FCoE, where 8 the is
4595 * FCoE redirection table size. If TC for FCoE is
4596 * less than or equal to TC3, we have enough queues
4597 * to add max of 8 queues for FCoE, so we start FCoE
4598 * Tx queue from the next one, i.e., reg_idx + 1.
4599 * If TC for FCoE is above TC3, implying 8 TC mode,
4600 * and we need 8 for FCoE, we have to take all queues
4601 * in that traffic class for FCoE.
4602 */
4603 if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3))
4604 fcoe_tx_i--;
Yi Zou0331a832009-05-17 12:33:52 +00004605 }
Alexander Duyckbf29ee62010-11-16 19:27:07 -08004606#endif /* CONFIG_IXGBE_DCB */
4607 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
4608 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
4609 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
4610 ixgbe_cache_ring_fdir(adapter);
4611 else
4612 ixgbe_cache_ring_rss(adapter);
4613
4614 fcoe_rx_i = f->mask;
4615 fcoe_tx_i = f->mask;
4616 }
4617 for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
4618 adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
4619 adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
4620 }
4621 return true;
Yi Zou0331a832009-05-17 12:33:52 +00004622}
4623
4624#endif /* IXGBE_FCOE */
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08004625/**
Greg Rose1cdd1ec2010-01-09 02:26:46 +00004626 * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
4627 * @adapter: board private structure to initialize
4628 *
4629 * SR-IOV doesn't use any descriptor rings but changes the default if
4630 * no other mapping is used.
4631 *
4632 */
4633static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
4634{
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00004635 adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2;
4636 adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2;
Greg Rose1cdd1ec2010-01-09 02:26:46 +00004637 if (adapter->num_vfs)
4638 return true;
4639 else
4640 return false;
4641}
4642
4643/**
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08004644 * ixgbe_cache_ring_register - Descriptor ring to register mapping
4645 * @adapter: board private structure to initialize
4646 *
4647 * Once we know the feature-set enabled for the device, we'll cache
4648 * the register offset the descriptor ring is assigned to.
4649 *
4650 * Note, the order the various feature calls is important. It must start with
4651 * the "most" features enabled at the same time, then trickle down to the
4652 * least amount of features turned on at once.
4653 **/
4654static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
4655{
4656 /* start with default case */
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00004657 adapter->rx_ring[0]->reg_idx = 0;
4658 adapter->tx_ring[0]->reg_idx = 0;
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08004659
Greg Rose1cdd1ec2010-01-09 02:26:46 +00004660 if (ixgbe_cache_ring_sriov(adapter))
4661 return;
4662
Yi Zou0331a832009-05-17 12:33:52 +00004663#ifdef IXGBE_FCOE
4664 if (ixgbe_cache_ring_fcoe(adapter))
4665 return;
4666
4667#endif /* IXGBE_FCOE */
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08004668#ifdef CONFIG_IXGBE_DCB
4669 if (ixgbe_cache_ring_dcb(adapter))
4670 return;
4671
4672#endif
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00004673 if (ixgbe_cache_ring_fdir(adapter))
4674 return;
4675
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08004676 if (ixgbe_cache_ring_rss(adapter))
4677 return;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004678}
4679
Auke Kok9a799d72007-09-15 14:07:45 -07004680/**
4681 * ixgbe_alloc_queues - Allocate memory for all rings
4682 * @adapter: board private structure to initialize
4683 *
4684 * We allocate one ring per queue at run-time since we don't know the
Jesse Brandeburg4df10462009-03-13 22:15:31 +00004685 * number of queues at compile-time. The polling_netdev array is
4686 * intended for Multiqueue, but should work fine with a single queue.
Auke Kok9a799d72007-09-15 14:07:45 -07004687 **/
Alexander Duyck2f90b862008-11-20 20:52:10 -08004688static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
Auke Kok9a799d72007-09-15 14:07:45 -07004689{
Eric Dumazete2ddeba2010-11-16 19:27:18 -08004690 int rx = 0, tx = 0, nid = adapter->node;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004691
Eric Dumazete2ddeba2010-11-16 19:27:18 -08004692 if (nid < 0 || !node_online(nid))
4693 nid = first_online_node;
4694
4695 for (; tx < adapter->num_tx_queues; tx++) {
4696 struct ixgbe_ring *ring;
4697
4698 ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid);
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00004699 if (!ring)
Eric Dumazete2ddeba2010-11-16 19:27:18 -08004700 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00004701 if (!ring)
Eric Dumazete2ddeba2010-11-16 19:27:18 -08004702 goto err_allocation;
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00004703 ring->count = adapter->tx_ring_count;
Eric Dumazete2ddeba2010-11-16 19:27:18 -08004704 ring->queue_index = tx;
4705 ring->numa_node = nid;
Alexander Duyckb6ec8952010-11-16 19:26:49 -08004706 ring->dev = &adapter->pdev->dev;
Alexander Duyckfc77dc32010-11-16 19:26:51 -08004707 ring->netdev = adapter->netdev;
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00004708
Eric Dumazete2ddeba2010-11-16 19:27:18 -08004709 adapter->tx_ring[tx] = ring;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004710 }
Jesse Brandeburgb9804972008-09-11 20:00:29 -07004711
Eric Dumazete2ddeba2010-11-16 19:27:18 -08004712 for (; rx < adapter->num_rx_queues; rx++) {
4713 struct ixgbe_ring *ring;
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00004714
Eric Dumazete2ddeba2010-11-16 19:27:18 -08004715 ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid);
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00004716 if (!ring)
Eric Dumazete2ddeba2010-11-16 19:27:18 -08004717 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00004718 if (!ring)
Eric Dumazete2ddeba2010-11-16 19:27:18 -08004719 goto err_allocation;
4720 ring->count = adapter->rx_ring_count;
4721 ring->queue_index = rx;
4722 ring->numa_node = nid;
Alexander Duyckb6ec8952010-11-16 19:26:49 -08004723 ring->dev = &adapter->pdev->dev;
Alexander Duyckfc77dc32010-11-16 19:26:51 -08004724 ring->netdev = adapter->netdev;
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00004725
Eric Dumazete2ddeba2010-11-16 19:27:18 -08004726 adapter->rx_ring[rx] = ring;
Auke Kok9a799d72007-09-15 14:07:45 -07004727 }
4728
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004729 ixgbe_cache_ring_register(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07004730
4731 return 0;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004732
Eric Dumazete2ddeba2010-11-16 19:27:18 -08004733err_allocation:
4734 while (tx)
4735 kfree(adapter->tx_ring[--tx]);
4736
4737 while (rx)
4738 kfree(adapter->rx_ring[--rx]);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004739 return -ENOMEM;
4740}
4741
4742/**
4743 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
4744 * @adapter: board private structure to initialize
4745 *
4746 * Attempt to configure the interrupts using the best available
4747 * capabilities of the hardware and the kernel.
4748 **/
Al Virofeea6a52008-11-27 15:34:07 -08004749static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004750{
PJ Waskiewicz8be0e462009-03-31 21:34:05 +00004751 struct ixgbe_hw *hw = &adapter->hw;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004752 int err = 0;
4753 int vector, v_budget;
4754
4755 /*
4756 * It's easy to be greedy for MSI-X vectors, but it really
4757 * doesn't do us much good if we have a lot more vectors
4758 * than CPU's. So let's be conservative and only ask for
PJ Waskiewicz342bde12009-11-12 23:50:43 +00004759 * (roughly) the same number of vectors as there are CPU's.
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004760 */
4761 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
Joe Perchese8e9f692010-09-07 21:34:53 +00004762 (int)num_online_cpus()) + NON_Q_VECTORS;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004763
4764 /*
4765 * At the same time, hardware can only support a maximum of
PJ Waskiewicz8be0e462009-03-31 21:34:05 +00004766 * hw.mac->max_msix_vectors vectors. With features
4767 * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
4768 * descriptor queues supported by our device. Thus, we cap it off in
4769 * those rare cases where the cpu count also exceeds our vector limit.
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004770 */
PJ Waskiewicz8be0e462009-03-31 21:34:05 +00004771 v_budget = min(v_budget, (int)hw->mac.max_msix_vectors);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004772
4773 /* A failure in MSI-X entry allocation isn't fatal, but it does
4774 * mean we disable MSI-X capabilities of the adapter. */
4775 adapter->msix_entries = kcalloc(v_budget,
Joe Perchese8e9f692010-09-07 21:34:53 +00004776 sizeof(struct msix_entry), GFP_KERNEL);
Alexander Duyck7a921c92009-05-06 10:43:28 +00004777 if (adapter->msix_entries) {
4778 for (vector = 0; vector < v_budget; vector++)
4779 adapter->msix_entries[vector].entry = vector;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004780
Alexander Duyck7a921c92009-05-06 10:43:28 +00004781 ixgbe_acquire_msix_vectors(adapter, v_budget);
4782
4783 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
4784 goto out;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004785 }
David S. Miller26d27842010-05-03 15:18:22 -07004786
Alexander Duyck7a921c92009-05-06 10:43:28 +00004787 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
4788 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00004789 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
4790 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
4791 adapter->atr_sample_rate = 0;
Greg Rose1cdd1ec2010-01-09 02:26:46 +00004792 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
4793 ixgbe_disable_sriov(adapter);
4794
Ben Hutchings847f53f2010-09-27 08:28:56 +00004795 err = ixgbe_set_num_queues(adapter);
4796 if (err)
4797 return err;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004798
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004799 err = pci_enable_msi(adapter->pdev);
4800 if (!err) {
4801 adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
4802 } else {
Emil Tantilov849c4542010-06-03 16:53:41 +00004803 netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
4804 "Unable to allocate MSI interrupt, "
4805 "falling back to legacy. Error: %d\n", err);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004806 /* reset err */
4807 err = 0;
4808 }
4809
4810out:
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004811 return err;
4812}
4813
Alexander Duyck7a921c92009-05-06 10:43:28 +00004814/**
4815 * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
4816 * @adapter: board private structure to initialize
4817 *
4818 * We allocate one q_vector per queue interrupt. If allocation fails we
4819 * return -ENOMEM.
4820 **/
4821static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
4822{
4823 int q_idx, num_q_vectors;
4824 struct ixgbe_q_vector *q_vector;
4825 int napi_vectors;
4826 int (*poll)(struct napi_struct *, int);
4827
4828 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
4829 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
4830 napi_vectors = adapter->num_rx_queues;
Alexander Duyck91281fd2009-06-04 16:00:27 +00004831 poll = &ixgbe_clean_rxtx_many;
Alexander Duyck7a921c92009-05-06 10:43:28 +00004832 } else {
4833 num_q_vectors = 1;
4834 napi_vectors = 1;
4835 poll = &ixgbe_poll;
4836 }
4837
4838 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
Jesse Brandeburg1a6c14a2010-02-03 14:18:50 +00004839 q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector),
Joe Perchese8e9f692010-09-07 21:34:53 +00004840 GFP_KERNEL, adapter->node);
Jesse Brandeburg1a6c14a2010-02-03 14:18:50 +00004841 if (!q_vector)
4842 q_vector = kzalloc(sizeof(struct ixgbe_q_vector),
Joe Perchese8e9f692010-09-07 21:34:53 +00004843 GFP_KERNEL);
Alexander Duyck7a921c92009-05-06 10:43:28 +00004844 if (!q_vector)
4845 goto err_out;
4846 q_vector->adapter = adapter;
Nelson, Shannonf7554a22009-09-18 09:46:06 +00004847 if (q_vector->txr_count && !q_vector->rxr_count)
4848 q_vector->eitr = adapter->tx_eitr_param;
4849 else
4850 q_vector->eitr = adapter->rx_eitr_param;
Alexander Duyckfe49f042009-06-04 16:00:09 +00004851 q_vector->v_idx = q_idx;
Alexander Duyck91281fd2009-06-04 16:00:27 +00004852 netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64);
Alexander Duyck7a921c92009-05-06 10:43:28 +00004853 adapter->q_vector[q_idx] = q_vector;
4854 }
4855
4856 return 0;
4857
4858err_out:
4859 while (q_idx) {
4860 q_idx--;
4861 q_vector = adapter->q_vector[q_idx];
4862 netif_napi_del(&q_vector->napi);
4863 kfree(q_vector);
4864 adapter->q_vector[q_idx] = NULL;
4865 }
4866 return -ENOMEM;
4867}
4868
4869/**
4870 * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
4871 * @adapter: board private structure to initialize
4872 *
4873 * This function frees the memory allocated to the q_vectors. In addition if
4874 * NAPI is enabled it will delete any references to the NAPI struct prior
4875 * to freeing the q_vector.
4876 **/
4877static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
4878{
4879 int q_idx, num_q_vectors;
Alexander Duyck7a921c92009-05-06 10:43:28 +00004880
Alexander Duyck91281fd2009-06-04 16:00:27 +00004881 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
Alexander Duyck7a921c92009-05-06 10:43:28 +00004882 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
Alexander Duyck91281fd2009-06-04 16:00:27 +00004883 else
Alexander Duyck7a921c92009-05-06 10:43:28 +00004884 num_q_vectors = 1;
Alexander Duyck7a921c92009-05-06 10:43:28 +00004885
4886 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
4887 struct ixgbe_q_vector *q_vector = adapter->q_vector[q_idx];
Alexander Duyck7a921c92009-05-06 10:43:28 +00004888 adapter->q_vector[q_idx] = NULL;
Alexander Duyck91281fd2009-06-04 16:00:27 +00004889 netif_napi_del(&q_vector->napi);
Alexander Duyck7a921c92009-05-06 10:43:28 +00004890 kfree(q_vector);
4891 }
4892}
4893
Don Skidmore7b25cdb2009-08-25 04:47:32 +00004894static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004895{
4896 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
4897 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
4898 pci_disable_msix(adapter->pdev);
4899 kfree(adapter->msix_entries);
4900 adapter->msix_entries = NULL;
4901 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
4902 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
4903 pci_disable_msi(adapter->pdev);
4904 }
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004905}
4906
4907/**
4908 * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
4909 * @adapter: board private structure to initialize
4910 *
4911 * We determine which interrupt scheme to use based on...
4912 * - Kernel support (MSI, MSI-X)
4913 * - which can be user-defined (via MODULE_PARAM)
4914 * - Hardware queue count (num_*_queues)
4915 * - defined by miscellaneous hardware support/features (RSS, etc.)
4916 **/
Alexander Duyck2f90b862008-11-20 20:52:10 -08004917int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004918{
4919 int err;
4920
4921 /* Number of supported queues */
Ben Hutchings847f53f2010-09-27 08:28:56 +00004922 err = ixgbe_set_num_queues(adapter);
4923 if (err)
4924 return err;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004925
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004926 err = ixgbe_set_interrupt_capability(adapter);
4927 if (err) {
Emil Tantilov849c4542010-06-03 16:53:41 +00004928 e_dev_err("Unable to setup interrupt capabilities\n");
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004929 goto err_set_interrupt;
4930 }
4931
Alexander Duyck7a921c92009-05-06 10:43:28 +00004932 err = ixgbe_alloc_q_vectors(adapter);
4933 if (err) {
Emil Tantilov849c4542010-06-03 16:53:41 +00004934 e_dev_err("Unable to allocate memory for queue vectors\n");
Alexander Duyck7a921c92009-05-06 10:43:28 +00004935 goto err_alloc_q_vectors;
4936 }
4937
4938 err = ixgbe_alloc_queues(adapter);
4939 if (err) {
Emil Tantilov849c4542010-06-03 16:53:41 +00004940 e_dev_err("Unable to allocate memory for queues\n");
Alexander Duyck7a921c92009-05-06 10:43:28 +00004941 goto err_alloc_queues;
4942 }
4943
Emil Tantilov849c4542010-06-03 16:53:41 +00004944 e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
Emil Tantilov396e7992010-07-01 20:05:12 +00004945 (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
4946 adapter->num_rx_queues, adapter->num_tx_queues);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004947
4948 set_bit(__IXGBE_DOWN, &adapter->state);
4949
4950 return 0;
4951
Alexander Duyck7a921c92009-05-06 10:43:28 +00004952err_alloc_queues:
4953 ixgbe_free_q_vectors(adapter);
4954err_alloc_q_vectors:
4955 ixgbe_reset_interrupt_capability(adapter);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004956err_set_interrupt:
Alexander Duyck7a921c92009-05-06 10:43:28 +00004957 return err;
4958}
4959
Eric Dumazet1a515022010-11-16 19:26:42 -08004960static void ring_free_rcu(struct rcu_head *head)
4961{
4962 kfree(container_of(head, struct ixgbe_ring, rcu));
4963}
4964
Alexander Duyck7a921c92009-05-06 10:43:28 +00004965/**
4966 * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
4967 * @adapter: board private structure to clear interrupt scheme on
4968 *
4969 * We go through and clear interrupt specific resources and reset the structure
4970 * to pre-load conditions
4971 **/
4972void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
4973{
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00004974 int i;
4975
4976 for (i = 0; i < adapter->num_tx_queues; i++) {
4977 kfree(adapter->tx_ring[i]);
4978 adapter->tx_ring[i] = NULL;
4979 }
4980 for (i = 0; i < adapter->num_rx_queues; i++) {
Eric Dumazet1a515022010-11-16 19:26:42 -08004981 struct ixgbe_ring *ring = adapter->rx_ring[i];
4982
4983 /* ixgbe_get_stats64() might access this ring, we must wait
4984 * a grace period before freeing it.
4985 */
4986 call_rcu(&ring->rcu, ring_free_rcu);
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00004987 adapter->rx_ring[i] = NULL;
4988 }
Alexander Duyck7a921c92009-05-06 10:43:28 +00004989
4990 ixgbe_free_q_vectors(adapter);
4991 ixgbe_reset_interrupt_capability(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07004992}
4993
4994/**
Donald Skidmorec4900be2008-11-20 21:11:42 -08004995 * ixgbe_sfp_timer - worker thread to find a missing module
4996 * @data: pointer to our adapter struct
4997 **/
4998static void ixgbe_sfp_timer(unsigned long data)
4999{
5000 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
5001
Jesse Brandeburg4df10462009-03-13 22:15:31 +00005002 /*
5003 * Do the sfp_timer outside of interrupt context due to the
Donald Skidmorec4900be2008-11-20 21:11:42 -08005004 * delays that sfp+ detection requires
5005 */
5006 schedule_work(&adapter->sfp_task);
5007}
5008
5009/**
5010 * ixgbe_sfp_task - worker thread to find a missing module
5011 * @work: pointer to work_struct containing our data
5012 **/
5013static void ixgbe_sfp_task(struct work_struct *work)
5014{
5015 struct ixgbe_adapter *adapter = container_of(work,
Joe Perchese8e9f692010-09-07 21:34:53 +00005016 struct ixgbe_adapter,
5017 sfp_task);
Donald Skidmorec4900be2008-11-20 21:11:42 -08005018 struct ixgbe_hw *hw = &adapter->hw;
5019
5020 if ((hw->phy.type == ixgbe_phy_nl) &&
5021 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
5022 s32 ret = hw->phy.ops.identify_sfp(hw);
Don Skidmore63d6e1d2009-07-02 12:50:12 +00005023 if (ret == IXGBE_ERR_SFP_NOT_PRESENT)
Donald Skidmorec4900be2008-11-20 21:11:42 -08005024 goto reschedule;
5025 ret = hw->phy.ops.reset(hw);
5026 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
Emil Tantilov849c4542010-06-03 16:53:41 +00005027 e_dev_err("failed to initialize because an unsupported "
5028 "SFP+ module type was detected.\n");
5029 e_dev_err("Reload the driver after installing a "
5030 "supported module.\n");
Donald Skidmorec4900be2008-11-20 21:11:42 -08005031 unregister_netdev(adapter->netdev);
5032 } else {
Emil Tantilov396e7992010-07-01 20:05:12 +00005033 e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type);
Donald Skidmorec4900be2008-11-20 21:11:42 -08005034 }
5035 /* don't need this routine any more */
5036 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
5037 }
5038 return;
5039reschedule:
5040 if (test_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state))
5041 mod_timer(&adapter->sfp_timer,
Joe Perchese8e9f692010-09-07 21:34:53 +00005042 round_jiffies(jiffies + (2 * HZ)));
Donald Skidmorec4900be2008-11-20 21:11:42 -08005043}
5044
5045/**
Auke Kok9a799d72007-09-15 14:07:45 -07005046 * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
5047 * @adapter: board private structure to initialize
5048 *
5049 * ixgbe_sw_init initializes the Adapter private data structure.
5050 * Fields are initialized based on PCI device information and
5051 * OS network device settings (MTU size).
5052 **/
5053static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
5054{
5055 struct ixgbe_hw *hw = &adapter->hw;
5056 struct pci_dev *pdev = adapter->pdev;
Peter Waskiewicz9a713e72010-02-10 16:07:54 +00005057 struct net_device *dev = adapter->netdev;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08005058 unsigned int rss;
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -08005059#ifdef CONFIG_IXGBE_DCB
Alexander Duyck2f90b862008-11-20 20:52:10 -08005060 int j;
5061 struct tc_configuration *tc;
5062#endif
John Fastabend16b61be2010-11-16 19:26:44 -08005063 int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08005064
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07005065 /* PCI config space info */
5066
5067 hw->vendor_id = pdev->vendor;
5068 hw->device_id = pdev->device;
5069 hw->revision_id = pdev->revision;
5070 hw->subsystem_vendor_id = pdev->subsystem_vendor;
5071 hw->subsystem_device_id = pdev->subsystem_device;
5072
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08005073 /* Set capability flags */
5074 rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
5075 adapter->ring_feature[RING_F_RSS].indices = rss;
5076 adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
Alexander Duyck2f90b862008-11-20 20:52:10 -08005077 adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
Alexander Duyckbd508172010-11-16 19:27:03 -08005078 switch (hw->mac.type) {
5079 case ixgbe_mac_82598EB:
Don Skidmorebf069c92009-05-07 10:39:54 +00005080 if (hw->device_id == IXGBE_DEV_ID_82598AT)
5081 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005082 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
Alexander Duyckbd508172010-11-16 19:27:03 -08005083 break;
5084 case ixgbe_mac_82599EB:
Don Skidmoreb93a2222010-11-16 19:27:17 -08005085 case ixgbe_mac_X540:
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005086 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
Peter P Waskiewicz Jr0c19d6a2009-07-30 12:25:28 +00005087 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
5088 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
Mallikarjuna R Chilakala119fc602010-05-20 23:07:06 -07005089 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
5090 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
Peter Waskiewicz9a713e72010-02-10 16:07:54 +00005091 if (dev->features & NETIF_F_NTUPLE) {
5092 /* Flow Director perfect filter enabled */
5093 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
5094 adapter->atr_sample_rate = 0;
5095 spin_lock_init(&adapter->fdir_perfect_lock);
5096 } else {
5097 /* Flow Director hash filters enabled */
5098 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
5099 adapter->atr_sample_rate = 20;
5100 }
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00005101 adapter->ring_feature[RING_F_FDIR].indices =
Joe Perchese8e9f692010-09-07 21:34:53 +00005102 IXGBE_MAX_FDIR_INDICES;
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00005103 adapter->fdir_pballoc = 0;
Yi Zoueacd73f2009-05-13 13:11:06 +00005104#ifdef IXGBE_FCOE
Yi Zou0d551582009-07-22 14:07:12 +00005105 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
5106 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
5107 adapter->ring_feature[RING_F_FCOE].indices = 0;
Yi Zou61a0f422009-12-03 11:32:22 +00005108#ifdef CONFIG_IXGBE_DCB
Yi Zou6ee16522009-08-31 12:34:28 +00005109 /* Default traffic class to use for FCoE */
5110 adapter->fcoe.tc = IXGBE_FCOE_DEFTC;
John Fastabend56075a92010-07-26 20:41:31 +00005111 adapter->fcoe.up = IXGBE_FCOE_DEFTC;
Yi Zou61a0f422009-12-03 11:32:22 +00005112#endif
Yi Zoueacd73f2009-05-13 13:11:06 +00005113#endif /* IXGBE_FCOE */
Alexander Duyckbd508172010-11-16 19:27:03 -08005114 break;
5115 default:
5116 break;
Alexander Duyckf8212f92009-04-27 22:42:37 +00005117 }
Alexander Duyck2f90b862008-11-20 20:52:10 -08005118
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -08005119#ifdef CONFIG_IXGBE_DCB
Alexander Duyck2f90b862008-11-20 20:52:10 -08005120 /* Configure DCB traffic classes */
5121 for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
5122 tc = &adapter->dcb_cfg.tc_config[j];
5123 tc->path[DCB_TX_CONFIG].bwg_id = 0;
5124 tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
5125 tc->path[DCB_RX_CONFIG].bwg_id = 0;
5126 tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
5127 tc->dcb_pfc = pfc_disabled;
5128 }
5129 adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
5130 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
5131 adapter->dcb_cfg.rx_pba_cfg = pba_equal;
Peter P Waskiewicz Jr264857b2009-05-17 12:35:16 +00005132 adapter->dcb_cfg.pfc_mode_enable = false;
Alexander Duyck2f90b862008-11-20 20:52:10 -08005133 adapter->dcb_cfg.round_robin_enable = false;
5134 adapter->dcb_set_bitmap = 0x00;
5135 ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
Joe Perchese8e9f692010-09-07 21:34:53 +00005136 adapter->ring_feature[RING_F_DCB].indices);
Alexander Duyck2f90b862008-11-20 20:52:10 -08005137
5138#endif
Auke Kok9a799d72007-09-15 14:07:45 -07005139
5140 /* default flow control settings */
Don Skidmorecd7664f2009-03-31 21:33:44 +00005141 hw->fc.requested_mode = ixgbe_fc_full;
Don Skidmore71fd5702009-03-31 21:35:05 +00005142 hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */
Peter P Waskiewicz Jr264857b2009-05-17 12:35:16 +00005143#ifdef CONFIG_DCB
5144 adapter->last_lfc_mode = hw->fc.current_mode;
5145#endif
John Fastabend16b61be2010-11-16 19:26:44 -08005146 hw->fc.high_water = FC_HIGH_WATER(max_frame);
5147 hw->fc.low_water = FC_LOW_WATER(max_frame);
Jesse Brandeburg2b9ade92008-08-26 04:27:10 -07005148 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
5149 hw->fc.send_xon = true;
Don Skidmore71fd5702009-03-31 21:35:05 +00005150 hw->fc.disable_fc_autoneg = false;
Auke Kok9a799d72007-09-15 14:07:45 -07005151
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07005152 /* enable itr by default in dynamic mode */
Nelson, Shannonf7554a22009-09-18 09:46:06 +00005153 adapter->rx_itr_setting = 1;
5154 adapter->rx_eitr_param = 20000;
5155 adapter->tx_itr_setting = 1;
5156 adapter->tx_eitr_param = 10000;
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07005157
5158 /* set defaults for eitr in MegaBytes */
5159 adapter->eitr_low = 10;
5160 adapter->eitr_high = 20;
5161
5162 /* set default ring sizes */
5163 adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
5164 adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
5165
Auke Kok9a799d72007-09-15 14:07:45 -07005166 /* initialize eeprom parameters */
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07005167 if (ixgbe_init_eeprom_params_generic(hw)) {
Emil Tantilov849c4542010-06-03 16:53:41 +00005168 e_dev_err("EEPROM initialization failed\n");
Auke Kok9a799d72007-09-15 14:07:45 -07005169 return -EIO;
5170 }
5171
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08005172 /* enable rx csum by default */
Auke Kok9a799d72007-09-15 14:07:45 -07005173 adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
5174
Jesse Brandeburg1a6c14a2010-02-03 14:18:50 +00005175 /* get assigned NUMA node */
5176 adapter->node = dev_to_node(&pdev->dev);
5177
Auke Kok9a799d72007-09-15 14:07:45 -07005178 set_bit(__IXGBE_DOWN, &adapter->state);
5179
5180 return 0;
5181}
5182
5183/**
5184 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
Jesse Brandeburg3a581072008-08-26 04:27:08 -07005185 * @tx_ring: tx descriptor ring (for a specific queue) to setup
Auke Kok9a799d72007-09-15 14:07:45 -07005186 *
5187 * Return 0 on success, negative on failure
5188 **/
Alexander Duyckb6ec8952010-11-16 19:26:49 -08005189int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07005190{
Alexander Duyckb6ec8952010-11-16 19:26:49 -08005191 struct device *dev = tx_ring->dev;
Auke Kok9a799d72007-09-15 14:07:45 -07005192 int size;
5193
Jesse Brandeburg3a581072008-08-26 04:27:08 -07005194 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
Eric Dumazet89bf67f2010-11-22 00:15:06 +00005195 tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node);
Jesse Brandeburg1a6c14a2010-02-03 14:18:50 +00005196 if (!tx_ring->tx_buffer_info)
Eric Dumazet89bf67f2010-11-22 00:15:06 +00005197 tx_ring->tx_buffer_info = vzalloc(size);
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07005198 if (!tx_ring->tx_buffer_info)
5199 goto err;
Auke Kok9a799d72007-09-15 14:07:45 -07005200
5201 /* round up to nearest 4K */
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -08005202 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
Jesse Brandeburg3a581072008-08-26 04:27:08 -07005203 tx_ring->size = ALIGN(tx_ring->size, 4096);
Auke Kok9a799d72007-09-15 14:07:45 -07005204
Alexander Duyckb6ec8952010-11-16 19:26:49 -08005205 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
Nick Nunley1b507732010-04-27 13:10:27 +00005206 &tx_ring->dma, GFP_KERNEL);
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07005207 if (!tx_ring->desc)
5208 goto err;
Auke Kok9a799d72007-09-15 14:07:45 -07005209
Jesse Brandeburg3a581072008-08-26 04:27:08 -07005210 tx_ring->next_to_use = 0;
5211 tx_ring->next_to_clean = 0;
5212 tx_ring->work_limit = tx_ring->count;
Auke Kok9a799d72007-09-15 14:07:45 -07005213 return 0;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07005214
5215err:
5216 vfree(tx_ring->tx_buffer_info);
5217 tx_ring->tx_buffer_info = NULL;
Alexander Duyckb6ec8952010-11-16 19:26:49 -08005218 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07005219 return -ENOMEM;
Auke Kok9a799d72007-09-15 14:07:45 -07005220}
5221
5222/**
Alexander Duyck69888672008-09-11 20:05:39 -07005223 * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
5224 * @adapter: board private structure
5225 *
5226 * If this function returns with an error, then it's possible one or
5227 * more of the rings is populated (while the rest are not). It is the
5228 * callers duty to clean those orphaned rings.
5229 *
5230 * Return 0 on success, negative on failure
5231 **/
5232static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
5233{
5234 int i, err = 0;
5235
5236 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyckb6ec8952010-11-16 19:26:49 -08005237 err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
Alexander Duyck69888672008-09-11 20:05:39 -07005238 if (!err)
5239 continue;
Emil Tantilov396e7992010-07-01 20:05:12 +00005240 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
Alexander Duyck69888672008-09-11 20:05:39 -07005241 break;
5242 }
5243
5244 return err;
5245}
5246
5247/**
Auke Kok9a799d72007-09-15 14:07:45 -07005248 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
Jesse Brandeburg3a581072008-08-26 04:27:08 -07005249 * @rx_ring: rx descriptor ring (for a specific queue) to setup
Auke Kok9a799d72007-09-15 14:07:45 -07005250 *
5251 * Returns 0 on success, negative on failure
5252 **/
Alexander Duyckb6ec8952010-11-16 19:26:49 -08005253int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07005254{
Alexander Duyckb6ec8952010-11-16 19:26:49 -08005255 struct device *dev = rx_ring->dev;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08005256 int size;
Auke Kok9a799d72007-09-15 14:07:45 -07005257
Jesse Brandeburg3a581072008-08-26 04:27:08 -07005258 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
Eric Dumazet89bf67f2010-11-22 00:15:06 +00005259 rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node);
Jesse Brandeburg1a6c14a2010-02-03 14:18:50 +00005260 if (!rx_ring->rx_buffer_info)
Eric Dumazet89bf67f2010-11-22 00:15:06 +00005261 rx_ring->rx_buffer_info = vzalloc(size);
Alexander Duyckb6ec8952010-11-16 19:26:49 -08005262 if (!rx_ring->rx_buffer_info)
5263 goto err;
Auke Kok9a799d72007-09-15 14:07:45 -07005264
Auke Kok9a799d72007-09-15 14:07:45 -07005265 /* Round up to nearest 4K */
Jesse Brandeburg3a581072008-08-26 04:27:08 -07005266 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
5267 rx_ring->size = ALIGN(rx_ring->size, 4096);
Auke Kok9a799d72007-09-15 14:07:45 -07005268
Alexander Duyckb6ec8952010-11-16 19:26:49 -08005269 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
Nick Nunley1b507732010-04-27 13:10:27 +00005270 &rx_ring->dma, GFP_KERNEL);
Auke Kok9a799d72007-09-15 14:07:45 -07005271
Alexander Duyckb6ec8952010-11-16 19:26:49 -08005272 if (!rx_ring->desc)
5273 goto err;
Auke Kok9a799d72007-09-15 14:07:45 -07005274
Jesse Brandeburg3a581072008-08-26 04:27:08 -07005275 rx_ring->next_to_clean = 0;
5276 rx_ring->next_to_use = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07005277
5278 return 0;
Alexander Duyckb6ec8952010-11-16 19:26:49 -08005279err:
5280 vfree(rx_ring->rx_buffer_info);
5281 rx_ring->rx_buffer_info = NULL;
5282 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07005283 return -ENOMEM;
Auke Kok9a799d72007-09-15 14:07:45 -07005284}
5285
5286/**
Alexander Duyck69888672008-09-11 20:05:39 -07005287 * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
5288 * @adapter: board private structure
5289 *
5290 * If this function returns with an error, then it's possible one or
5291 * more of the rings is populated (while the rest are not). It is the
5292 * callers duty to clean those orphaned rings.
5293 *
5294 * Return 0 on success, negative on failure
5295 **/
Alexander Duyck69888672008-09-11 20:05:39 -07005296static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
5297{
5298 int i, err = 0;
5299
5300 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyckb6ec8952010-11-16 19:26:49 -08005301 err = ixgbe_setup_rx_resources(adapter->rx_ring[i]);
Alexander Duyck69888672008-09-11 20:05:39 -07005302 if (!err)
5303 continue;
Emil Tantilov396e7992010-07-01 20:05:12 +00005304 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
Alexander Duyck69888672008-09-11 20:05:39 -07005305 break;
5306 }
5307
5308 return err;
5309}
5310
5311/**
Auke Kok9a799d72007-09-15 14:07:45 -07005312 * ixgbe_free_tx_resources - Free Tx Resources per Queue
Auke Kok9a799d72007-09-15 14:07:45 -07005313 * @tx_ring: Tx descriptor ring for a specific queue
5314 *
5315 * Free all transmit software resources
5316 **/
Alexander Duyckb6ec8952010-11-16 19:26:49 -08005317void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07005318{
Alexander Duyckb6ec8952010-11-16 19:26:49 -08005319 ixgbe_clean_tx_ring(tx_ring);
Auke Kok9a799d72007-09-15 14:07:45 -07005320
5321 vfree(tx_ring->tx_buffer_info);
5322 tx_ring->tx_buffer_info = NULL;
5323
Alexander Duyckb6ec8952010-11-16 19:26:49 -08005324 /* if not set, then don't free */
5325 if (!tx_ring->desc)
5326 return;
5327
5328 dma_free_coherent(tx_ring->dev, tx_ring->size,
5329 tx_ring->desc, tx_ring->dma);
Auke Kok9a799d72007-09-15 14:07:45 -07005330
5331 tx_ring->desc = NULL;
5332}
5333
5334/**
5335 * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
5336 * @adapter: board private structure
5337 *
5338 * Free all transmit software resources
5339 **/
5340static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
5341{
5342 int i;
5343
5344 for (i = 0; i < adapter->num_tx_queues; i++)
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00005345 if (adapter->tx_ring[i]->desc)
Alexander Duyckb6ec8952010-11-16 19:26:49 -08005346 ixgbe_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9a799d72007-09-15 14:07:45 -07005347}
5348
5349/**
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005350 * ixgbe_free_rx_resources - Free Rx Resources
Auke Kok9a799d72007-09-15 14:07:45 -07005351 * @rx_ring: ring to clean the resources from
5352 *
5353 * Free all receive software resources
5354 **/
Alexander Duyckb6ec8952010-11-16 19:26:49 -08005355void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07005356{
Alexander Duyckb6ec8952010-11-16 19:26:49 -08005357 ixgbe_clean_rx_ring(rx_ring);
Auke Kok9a799d72007-09-15 14:07:45 -07005358
5359 vfree(rx_ring->rx_buffer_info);
5360 rx_ring->rx_buffer_info = NULL;
5361
Alexander Duyckb6ec8952010-11-16 19:26:49 -08005362 /* if not set, then don't free */
5363 if (!rx_ring->desc)
5364 return;
5365
5366 dma_free_coherent(rx_ring->dev, rx_ring->size,
5367 rx_ring->desc, rx_ring->dma);
Auke Kok9a799d72007-09-15 14:07:45 -07005368
5369 rx_ring->desc = NULL;
5370}
5371
5372/**
5373 * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
5374 * @adapter: board private structure
5375 *
5376 * Free all receive software resources
5377 **/
5378static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
5379{
5380 int i;
5381
5382 for (i = 0; i < adapter->num_rx_queues; i++)
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00005383 if (adapter->rx_ring[i]->desc)
Alexander Duyckb6ec8952010-11-16 19:26:49 -08005384 ixgbe_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9a799d72007-09-15 14:07:45 -07005385}
5386
5387/**
Auke Kok9a799d72007-09-15 14:07:45 -07005388 * ixgbe_change_mtu - Change the Maximum Transfer Unit
5389 * @netdev: network interface device structure
5390 * @new_mtu: new value for maximum frame size
5391 *
5392 * Returns 0 on success, negative on failure
5393 **/
5394static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
5395{
5396 struct ixgbe_adapter *adapter = netdev_priv(netdev);
John Fastabend16b61be2010-11-16 19:26:44 -08005397 struct ixgbe_hw *hw = &adapter->hw;
Auke Kok9a799d72007-09-15 14:07:45 -07005398 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
5399
Jesse Brandeburg42c783c2008-09-11 19:56:28 -07005400 /* MTU < 68 is an error and causes problems on some kernels */
5401 if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
Auke Kok9a799d72007-09-15 14:07:45 -07005402 return -EINVAL;
5403
Emil Tantilov396e7992010-07-01 20:05:12 +00005404 e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08005405 /* must set new MTU before calling down or up */
Auke Kok9a799d72007-09-15 14:07:45 -07005406 netdev->mtu = new_mtu;
5407
John Fastabend16b61be2010-11-16 19:26:44 -08005408 hw->fc.high_water = FC_HIGH_WATER(max_frame);
5409 hw->fc.low_water = FC_LOW_WATER(max_frame);
5410
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08005411 if (netif_running(netdev))
5412 ixgbe_reinit_locked(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07005413
5414 return 0;
5415}
5416
5417/**
5418 * ixgbe_open - Called when a network interface is made active
5419 * @netdev: network interface device structure
5420 *
5421 * Returns 0 on success, negative value on failure
5422 *
5423 * The open entry point is called when a network interface is made
5424 * active by the system (IFF_UP). At this point all resources needed
5425 * for transmit and receive operations are allocated, the interrupt
5426 * handler is registered with the OS, the watchdog timer is started,
5427 * and the stack is notified that the interface is ready.
5428 **/
5429static int ixgbe_open(struct net_device *netdev)
5430{
5431 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5432 int err;
Auke Kok9a799d72007-09-15 14:07:45 -07005433
Auke Kok4bebfaa2008-02-11 09:26:01 -08005434 /* disallow open during test */
5435 if (test_bit(__IXGBE_TESTING, &adapter->state))
5436 return -EBUSY;
5437
Jesse Brandeburg54386462009-04-17 20:44:27 +00005438 netif_carrier_off(netdev);
5439
Auke Kok9a799d72007-09-15 14:07:45 -07005440 /* allocate transmit descriptors */
5441 err = ixgbe_setup_all_tx_resources(adapter);
5442 if (err)
5443 goto err_setup_tx;
5444
Auke Kok9a799d72007-09-15 14:07:45 -07005445 /* allocate receive descriptors */
5446 err = ixgbe_setup_all_rx_resources(adapter);
5447 if (err)
5448 goto err_setup_rx;
5449
5450 ixgbe_configure(adapter);
5451
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08005452 err = ixgbe_request_irq(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07005453 if (err)
5454 goto err_req_irq;
5455
Auke Kok9a799d72007-09-15 14:07:45 -07005456 err = ixgbe_up_complete(adapter);
5457 if (err)
5458 goto err_up;
5459
Jeff Kirsherd55b53f2008-07-18 04:33:03 -07005460 netif_tx_start_all_queues(netdev);
5461
Auke Kok9a799d72007-09-15 14:07:45 -07005462 return 0;
5463
5464err_up:
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -08005465 ixgbe_release_hw_control(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07005466 ixgbe_free_irq(adapter);
5467err_req_irq:
Auke Kok9a799d72007-09-15 14:07:45 -07005468err_setup_rx:
Mallikarjuna R Chilakalaa20a1192009-03-31 21:34:44 +00005469 ixgbe_free_all_rx_resources(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07005470err_setup_tx:
Mallikarjuna R Chilakalaa20a1192009-03-31 21:34:44 +00005471 ixgbe_free_all_tx_resources(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07005472 ixgbe_reset(adapter);
5473
5474 return err;
5475}
5476
5477/**
5478 * ixgbe_close - Disables a network interface
5479 * @netdev: network interface device structure
5480 *
5481 * Returns 0, this is not allowed to fail
5482 *
5483 * The close entry point is called when an interface is de-activated
5484 * by the OS. The hardware is still under the drivers control, but
5485 * needs to be disabled. A global MAC reset is issued to stop the
5486 * hardware, and all transmit and receive resources are freed.
5487 **/
5488static int ixgbe_close(struct net_device *netdev)
5489{
5490 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07005491
5492 ixgbe_down(adapter);
5493 ixgbe_free_irq(adapter);
5494
5495 ixgbe_free_all_tx_resources(adapter);
5496 ixgbe_free_all_rx_resources(adapter);
5497
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -08005498 ixgbe_release_hw_control(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07005499
5500 return 0;
5501}
5502
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005503#ifdef CONFIG_PM
5504static int ixgbe_resume(struct pci_dev *pdev)
5505{
Alexander Duyckc60fbb02010-11-16 19:26:54 -08005506 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
5507 struct net_device *netdev = adapter->netdev;
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005508 u32 err;
5509
5510 pci_set_power_state(pdev, PCI_D0);
5511 pci_restore_state(pdev);
Don Skidmore656ab812009-12-23 21:19:19 -08005512 /*
5513 * pci_restore_state clears dev->state_saved so call
5514 * pci_save_state to restore it.
5515 */
5516 pci_save_state(pdev);
gouji-new9ce77662009-05-06 10:44:45 +00005517
5518 err = pci_enable_device_mem(pdev);
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005519 if (err) {
Emil Tantilov849c4542010-06-03 16:53:41 +00005520 e_dev_err("Cannot enable PCI device from suspend\n");
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005521 return err;
5522 }
5523 pci_set_master(pdev);
5524
Don Skidmoredd4d8ca2009-04-29 00:22:31 -07005525 pci_wake_from_d3(pdev, false);
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005526
5527 err = ixgbe_init_interrupt_scheme(adapter);
5528 if (err) {
Emil Tantilov849c4542010-06-03 16:53:41 +00005529 e_dev_err("Cannot initialize interrupts for device\n");
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005530 return err;
5531 }
5532
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005533 ixgbe_reset(adapter);
5534
Waskiewicz Jr, Peter P495dce12009-04-23 11:15:18 +00005535 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
5536
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005537 if (netif_running(netdev)) {
Alexander Duyckc60fbb02010-11-16 19:26:54 -08005538 err = ixgbe_open(netdev);
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005539 if (err)
5540 return err;
5541 }
5542
5543 netif_device_attach(netdev);
5544
5545 return 0;
5546}
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005547#endif /* CONFIG_PM */
Rafael J. Wysocki9d8d05a2009-04-15 17:44:01 +00005548
5549static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005550{
Alexander Duyckc60fbb02010-11-16 19:26:54 -08005551 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
5552 struct net_device *netdev = adapter->netdev;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005553 struct ixgbe_hw *hw = &adapter->hw;
5554 u32 ctrl, fctrl;
5555 u32 wufc = adapter->wol;
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005556#ifdef CONFIG_PM
5557 int retval = 0;
5558#endif
5559
5560 netif_device_detach(netdev);
5561
5562 if (netif_running(netdev)) {
5563 ixgbe_down(adapter);
5564 ixgbe_free_irq(adapter);
5565 ixgbe_free_all_tx_resources(adapter);
5566 ixgbe_free_all_rx_resources(adapter);
5567 }
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005568
Alexander Duyck5f5ae6f2010-11-16 19:26:52 -08005569 ixgbe_clear_interrupt_scheme(adapter);
5570
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005571#ifdef CONFIG_PM
5572 retval = pci_save_state(pdev);
5573 if (retval)
5574 return retval;
Jesse Brandeburg4df10462009-03-13 22:15:31 +00005575
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005576#endif
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005577 if (wufc) {
5578 ixgbe_set_rx_mode(netdev);
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005579
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005580 /* turn on all-multi mode if wake on multicast is enabled */
5581 if (wufc & IXGBE_WUFC_MC) {
5582 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
5583 fctrl |= IXGBE_FCTRL_MPE;
5584 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
5585 }
5586
5587 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
5588 ctrl |= IXGBE_CTRL_GIO_DIS;
5589 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
5590
5591 IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc);
5592 } else {
5593 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
5594 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
5595 }
5596
Alexander Duyckbd508172010-11-16 19:27:03 -08005597 switch (hw->mac.type) {
5598 case ixgbe_mac_82598EB:
Don Skidmoredd4d8ca2009-04-29 00:22:31 -07005599 pci_wake_from_d3(pdev, false);
Alexander Duyckbd508172010-11-16 19:27:03 -08005600 break;
5601 case ixgbe_mac_82599EB:
Don Skidmoreb93a2222010-11-16 19:27:17 -08005602 case ixgbe_mac_X540:
Alexander Duyckbd508172010-11-16 19:27:03 -08005603 pci_wake_from_d3(pdev, !!wufc);
5604 break;
5605 default:
5606 break;
5607 }
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005608
Rafael J. Wysocki9d8d05a2009-04-15 17:44:01 +00005609 *enable_wake = !!wufc;
5610
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005611 ixgbe_release_hw_control(adapter);
5612
5613 pci_disable_device(pdev);
5614
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005615 return 0;
5616}
5617
Rafael J. Wysocki9d8d05a2009-04-15 17:44:01 +00005618#ifdef CONFIG_PM
5619static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
5620{
5621 int retval;
5622 bool wake;
5623
5624 retval = __ixgbe_shutdown(pdev, &wake);
5625 if (retval)
5626 return retval;
5627
5628 if (wake) {
5629 pci_prepare_to_sleep(pdev);
5630 } else {
5631 pci_wake_from_d3(pdev, false);
5632 pci_set_power_state(pdev, PCI_D3hot);
5633 }
5634
5635 return 0;
5636}
5637#endif /* CONFIG_PM */
5638
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005639static void ixgbe_shutdown(struct pci_dev *pdev)
5640{
Rafael J. Wysocki9d8d05a2009-04-15 17:44:01 +00005641 bool wake;
5642
5643 __ixgbe_shutdown(pdev, &wake);
5644
5645 if (system_state == SYSTEM_POWER_OFF) {
5646 pci_wake_from_d3(pdev, wake);
5647 pci_set_power_state(pdev, PCI_D3hot);
5648 }
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005649}
5650
5651/**
Auke Kok9a799d72007-09-15 14:07:45 -07005652 * ixgbe_update_stats - Update the board statistics counters.
5653 * @adapter: board private structure
5654 **/
5655void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5656{
Ajit Khaparde2d86f132009-10-07 02:43:49 +00005657 struct net_device *netdev = adapter->netdev;
Auke Kok9a799d72007-09-15 14:07:45 -07005658 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck5b7da512010-11-16 19:26:50 -08005659 struct ixgbe_hw_stats *hwstats = &adapter->stats;
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08005660 u64 total_mpc = 0;
5661 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
Alexander Duyck5b7da512010-11-16 19:26:50 -08005662 u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
5663 u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
5664 u64 bytes = 0, packets = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07005665
Don Skidmored08935c2010-06-11 13:20:29 +00005666 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
5667 test_bit(__IXGBE_RESETTING, &adapter->state))
5668 return;
5669
Mallikarjuna R Chilakala94b982b2009-11-23 06:32:06 +00005670 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
Alexander Duyckf8212f92009-04-27 22:42:37 +00005671 u64 rsc_count = 0;
Mallikarjuna R Chilakala94b982b2009-11-23 06:32:06 +00005672 u64 rsc_flush = 0;
PJ Waskiewiczd51019a2009-03-13 22:12:48 +00005673 for (i = 0; i < 16; i++)
5674 adapter->hw_rx_no_dma_resources +=
Joe Perches7ca647b2010-09-07 21:35:40 +00005675 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
Mallikarjuna R Chilakala94b982b2009-11-23 06:32:06 +00005676 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck5b7da512010-11-16 19:26:50 -08005677 rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
5678 rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
Mallikarjuna R Chilakala94b982b2009-11-23 06:32:06 +00005679 }
5680 adapter->rsc_total_count = rsc_count;
5681 adapter->rsc_total_flush = rsc_flush;
PJ Waskiewiczd51019a2009-03-13 22:12:48 +00005682 }
5683
Alexander Duyck5b7da512010-11-16 19:26:50 -08005684 for (i = 0; i < adapter->num_rx_queues; i++) {
5685 struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
5686 non_eop_descs += rx_ring->rx_stats.non_eop_descs;
5687 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
5688 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
5689 bytes += rx_ring->stats.bytes;
5690 packets += rx_ring->stats.packets;
5691 }
Mallikarjuna R Chilakalaeb985f02009-12-15 11:56:59 +00005692 adapter->non_eop_descs = non_eop_descs;
Alexander Duyck5b7da512010-11-16 19:26:50 -08005693 adapter->alloc_rx_page_failed = alloc_rx_page_failed;
5694 adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
5695 netdev->stats.rx_bytes = bytes;
5696 netdev->stats.rx_packets = packets;
5697
5698 bytes = 0;
5699 packets = 0;
5700 /* gather some stats to the adapter struct that are per queue */
5701 for (i = 0; i < adapter->num_tx_queues; i++) {
5702 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
5703 restart_queue += tx_ring->tx_stats.restart_queue;
5704 tx_busy += tx_ring->tx_stats.tx_busy;
5705 bytes += tx_ring->stats.bytes;
5706 packets += tx_ring->stats.packets;
5707 }
5708 adapter->restart_queue = restart_queue;
5709 adapter->tx_busy = tx_busy;
5710 netdev->stats.tx_bytes = bytes;
5711 netdev->stats.tx_packets = packets;
Jesse Brandeburg7ca3bc52009-12-03 11:33:29 +00005712
Joe Perches7ca647b2010-09-07 21:35:40 +00005713 hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08005714 for (i = 0; i < 8; i++) {
5715 /* for packet buffers not used, the register should read 0 */
5716 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
5717 missed_rx += mpc;
Joe Perches7ca647b2010-09-07 21:35:40 +00005718 hwstats->mpc[i] += mpc;
5719 total_mpc += hwstats->mpc[i];
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005720 if (hw->mac.type == ixgbe_mac_82598EB)
Joe Perches7ca647b2010-09-07 21:35:40 +00005721 hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
5722 hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
5723 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
5724 hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
5725 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
Alexander Duyckbd508172010-11-16 19:27:03 -08005726 switch (hw->mac.type) {
5727 case ixgbe_mac_82598EB:
Joe Perches7ca647b2010-09-07 21:35:40 +00005728 hwstats->pxonrxc[i] +=
5729 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
Alexander Duyckbd508172010-11-16 19:27:03 -08005730 break;
5731 case ixgbe_mac_82599EB:
Don Skidmoreb93a2222010-11-16 19:27:17 -08005732 case ixgbe_mac_X540:
Alexander Duyckbd508172010-11-16 19:27:03 -08005733 hwstats->pxonrxc[i] +=
5734 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
Alexander Duyckbd508172010-11-16 19:27:03 -08005735 break;
5736 default:
5737 break;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005738 }
Joe Perches7ca647b2010-09-07 21:35:40 +00005739 hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
5740 hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08005741 }
Joe Perches7ca647b2010-09-07 21:35:40 +00005742 hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08005743 /* work around hardware counting issue */
Joe Perches7ca647b2010-09-07 21:35:40 +00005744 hwstats->gprc -= missed_rx;
Auke Kok9a799d72007-09-15 14:07:45 -07005745
John Fastabendc84d3242010-11-16 19:27:12 -08005746 ixgbe_update_xoff_received(adapter);
5747
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08005748 /* 82598 hardware only has a 32 bit counter in the high register */
Alexander Duyckbd508172010-11-16 19:27:03 -08005749 switch (hw->mac.type) {
5750 case ixgbe_mac_82598EB:
5751 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
Alexander Duyckbd508172010-11-16 19:27:03 -08005752 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
5753 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
5754 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
5755 break;
5756 case ixgbe_mac_82599EB:
Don Skidmoreb93a2222010-11-16 19:27:17 -08005757 case ixgbe_mac_X540:
Joe Perches7ca647b2010-09-07 21:35:40 +00005758 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
Alexander Duyckbd508172010-11-16 19:27:03 -08005759 IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */
Joe Perches7ca647b2010-09-07 21:35:40 +00005760 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
Alexander Duyckbd508172010-11-16 19:27:03 -08005761 IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */
Joe Perches7ca647b2010-09-07 21:35:40 +00005762 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
Alexander Duyckbd508172010-11-16 19:27:03 -08005763 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
Joe Perches7ca647b2010-09-07 21:35:40 +00005764 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
Joe Perches7ca647b2010-09-07 21:35:40 +00005765 hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
5766 hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
Yi Zou6d455222009-05-13 13:12:16 +00005767#ifdef IXGBE_FCOE
Joe Perches7ca647b2010-09-07 21:35:40 +00005768 hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
5769 hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
5770 hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
5771 hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
5772 hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
5773 hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
Yi Zou6d455222009-05-13 13:12:16 +00005774#endif /* IXGBE_FCOE */
Alexander Duyckbd508172010-11-16 19:27:03 -08005775 break;
5776 default:
5777 break;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005778 }
Auke Kok9a799d72007-09-15 14:07:45 -07005779 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
Joe Perches7ca647b2010-09-07 21:35:40 +00005780 hwstats->bprc += bprc;
5781 hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005782 if (hw->mac.type == ixgbe_mac_82598EB)
Joe Perches7ca647b2010-09-07 21:35:40 +00005783 hwstats->mprc -= bprc;
5784 hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
5785 hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
5786 hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
5787 hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
5788 hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
5789 hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
5790 hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
5791 hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08005792 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
Joe Perches7ca647b2010-09-07 21:35:40 +00005793 hwstats->lxontxc += lxon;
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08005794 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
Joe Perches7ca647b2010-09-07 21:35:40 +00005795 hwstats->lxofftxc += lxoff;
5796 hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
5797 hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
5798 hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08005799 /*
5800 * 82598 errata - tx of flow control packets is included in tx counters
5801 */
5802 xon_off_tot = lxon + lxoff;
Joe Perches7ca647b2010-09-07 21:35:40 +00005803 hwstats->gptc -= xon_off_tot;
5804 hwstats->mptc -= xon_off_tot;
5805 hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
5806 hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
5807 hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
5808 hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
5809 hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
5810 hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
5811 hwstats->ptc64 -= xon_off_tot;
5812 hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
5813 hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
5814 hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
5815 hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
5816 hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
5817 hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
Auke Kok9a799d72007-09-15 14:07:45 -07005818
5819 /* Fill out the OS statistics structure */
Joe Perches7ca647b2010-09-07 21:35:40 +00005820 netdev->stats.multicast = hwstats->mprc;
Auke Kok9a799d72007-09-15 14:07:45 -07005821
5822 /* Rx Errors */
Joe Perches7ca647b2010-09-07 21:35:40 +00005823 netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec;
Ajit Khaparde2d86f132009-10-07 02:43:49 +00005824 netdev->stats.rx_dropped = 0;
Joe Perches7ca647b2010-09-07 21:35:40 +00005825 netdev->stats.rx_length_errors = hwstats->rlec;
5826 netdev->stats.rx_crc_errors = hwstats->crcerrs;
Ajit Khaparde2d86f132009-10-07 02:43:49 +00005827 netdev->stats.rx_missed_errors = total_mpc;
Auke Kok9a799d72007-09-15 14:07:45 -07005828}
5829
5830/**
5831 * ixgbe_watchdog - Timer Call-back
5832 * @data: pointer to adapter cast into an unsigned long
5833 **/
5834static void ixgbe_watchdog(unsigned long data)
5835{
5836 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07005837 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyckfe49f042009-06-04 16:00:09 +00005838 u64 eics = 0;
5839 int i;
Auke Kok9a799d72007-09-15 14:07:45 -07005840
Alexander Duyckfe49f042009-06-04 16:00:09 +00005841 /*
5842 * Do the watchdog outside of interrupt context due to the lovely
5843 * delays that some of the newer hardware requires
5844 */
Jesse Brandeburg22d5a712009-03-19 01:24:04 +00005845
Alexander Duyckfe49f042009-06-04 16:00:09 +00005846 if (test_bit(__IXGBE_DOWN, &adapter->state))
5847 goto watchdog_short_circuit;
Jesse Brandeburg22d5a712009-03-19 01:24:04 +00005848
Alexander Duyckfe49f042009-06-04 16:00:09 +00005849 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
5850 /*
5851 * for legacy and MSI interrupts don't set any bits
5852 * that are enabled for EIAM, because this operation
5853 * would set *both* EIMS and EICS for any bit in EIAM
5854 */
5855 IXGBE_WRITE_REG(hw, IXGBE_EICS,
5856 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
5857 goto watchdog_reschedule;
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07005858 }
5859
Alexander Duyckfe49f042009-06-04 16:00:09 +00005860 /* get one bit for every active tx/rx interrupt vector */
5861 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
5862 struct ixgbe_q_vector *qv = adapter->q_vector[i];
5863 if (qv->rxr_count || qv->txr_count)
5864 eics |= ((u64)1 << i);
5865 }
5866
5867 /* Cause software interrupt to ensure rx rings are cleaned */
5868 ixgbe_irq_rearm_queues(adapter, eics);
5869
5870watchdog_reschedule:
5871 /* Reset the timer */
5872 mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ));
5873
5874watchdog_short_circuit:
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07005875 schedule_work(&adapter->watchdog_task);
5876}
5877
5878/**
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005879 * ixgbe_multispeed_fiber_task - worker thread to configure multispeed fiber
5880 * @work: pointer to work_struct containing our data
5881 **/
5882static void ixgbe_multispeed_fiber_task(struct work_struct *work)
5883{
5884 struct ixgbe_adapter *adapter = container_of(work,
Joe Perchese8e9f692010-09-07 21:34:53 +00005885 struct ixgbe_adapter,
5886 multispeed_fiber_task);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005887 struct ixgbe_hw *hw = &adapter->hw;
5888 u32 autoneg;
Mallikarjuna R Chilakala8620a102009-09-01 13:49:35 +00005889 bool negotiation;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005890
5891 adapter->flags |= IXGBE_FLAG_IN_SFP_LINK_TASK;
Mallikarjuna R Chilakalaa1f25322009-06-30 11:44:36 +00005892 autoneg = hw->phy.autoneg_advertised;
5893 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
Mallikarjuna R Chilakala8620a102009-09-01 13:49:35 +00005894 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation);
Mallikarjuna R Chilakala1097cd12010-03-18 14:34:52 +00005895 hw->mac.autotry_restart = false;
Mallikarjuna R Chilakala8620a102009-09-01 13:49:35 +00005896 if (hw->mac.ops.setup_link)
5897 hw->mac.ops.setup_link(hw, autoneg, negotiation, true);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005898 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
5899 adapter->flags &= ~IXGBE_FLAG_IN_SFP_LINK_TASK;
5900}
5901
5902/**
5903 * ixgbe_sfp_config_module_task - worker thread to configure a new SFP+ module
5904 * @work: pointer to work_struct containing our data
5905 **/
5906static void ixgbe_sfp_config_module_task(struct work_struct *work)
5907{
5908 struct ixgbe_adapter *adapter = container_of(work,
Joe Perchese8e9f692010-09-07 21:34:53 +00005909 struct ixgbe_adapter,
5910 sfp_config_module_task);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005911 struct ixgbe_hw *hw = &adapter->hw;
5912 u32 err;
5913
5914 adapter->flags |= IXGBE_FLAG_IN_SFP_MOD_TASK;
Don Skidmore63d6e1d2009-07-02 12:50:12 +00005915
5916 /* Time for electrical oscillations to settle down */
5917 msleep(100);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005918 err = hw->phy.ops.identify_sfp(hw);
Don Skidmore63d6e1d2009-07-02 12:50:12 +00005919
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005920 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
Emil Tantilov849c4542010-06-03 16:53:41 +00005921 e_dev_err("failed to initialize because an unsupported SFP+ "
5922 "module type was detected.\n");
5923 e_dev_err("Reload the driver after installing a supported "
5924 "module.\n");
Don Skidmore63d6e1d2009-07-02 12:50:12 +00005925 unregister_netdev(adapter->netdev);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005926 return;
5927 }
5928 hw->mac.ops.setup_sfp(hw);
5929
Tony Breeds8d1c3c02009-04-09 22:29:10 +00005930 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005931 /* This will also work for DA Twinax connections */
5932 schedule_work(&adapter->multispeed_fiber_task);
5933 adapter->flags &= ~IXGBE_FLAG_IN_SFP_MOD_TASK;
5934}
5935
5936/**
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00005937 * ixgbe_fdir_reinit_task - worker thread to reinit FDIR filter table
5938 * @work: pointer to work_struct containing our data
5939 **/
5940static void ixgbe_fdir_reinit_task(struct work_struct *work)
5941{
5942 struct ixgbe_adapter *adapter = container_of(work,
Joe Perchese8e9f692010-09-07 21:34:53 +00005943 struct ixgbe_adapter,
5944 fdir_reinit_task);
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00005945 struct ixgbe_hw *hw = &adapter->hw;
5946 int i;
5947
5948 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
5949 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck7d637bc2010-11-16 19:26:56 -08005950 set_bit(__IXGBE_TX_FDIR_INIT_DONE,
5951 &(adapter->tx_ring[i]->state));
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00005952 } else {
Emil Tantilov396e7992010-07-01 20:05:12 +00005953 e_err(probe, "failed to finish FDIR re-initialization, "
Emil Tantilov849c4542010-06-03 16:53:41 +00005954 "ignored adding FDIR ATR filters\n");
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00005955 }
5956 /* Done FDIR Re-initialization, enable transmits */
5957 netif_tx_start_all_queues(adapter->netdev);
5958}
5959
John Fastabend10eec952010-02-03 14:23:32 +00005960static DEFINE_MUTEX(ixgbe_watchdog_lock);
5961
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00005962/**
Alexander Duyck69888672008-09-11 20:05:39 -07005963 * ixgbe_watchdog_task - worker thread to bring link up
5964 * @work: pointer to work_struct containing our data
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07005965 **/
5966static void ixgbe_watchdog_task(struct work_struct *work)
5967{
5968 struct ixgbe_adapter *adapter = container_of(work,
Joe Perchese8e9f692010-09-07 21:34:53 +00005969 struct ixgbe_adapter,
5970 watchdog_task);
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07005971 struct net_device *netdev = adapter->netdev;
5972 struct ixgbe_hw *hw = &adapter->hw;
John Fastabend10eec952010-02-03 14:23:32 +00005973 u32 link_speed;
5974 bool link_up;
Nelson, Shannonbc59fcd2009-04-27 22:43:12 +00005975 int i;
5976 struct ixgbe_ring *tx_ring;
5977 int some_tx_pending = 0;
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07005978
John Fastabend10eec952010-02-03 14:23:32 +00005979 mutex_lock(&ixgbe_watchdog_lock);
5980
5981 link_up = adapter->link_up;
5982 link_speed = adapter->link_speed;
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07005983
5984 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
5985 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
Peter P Waskiewicz Jr264857b2009-05-17 12:35:16 +00005986 if (link_up) {
5987#ifdef CONFIG_DCB
5988 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
5989 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
Mallikarjuna R Chilakala620fa032009-06-04 11:11:13 +00005990 hw->mac.ops.fc_enable(hw, i);
Peter P Waskiewicz Jr264857b2009-05-17 12:35:16 +00005991 } else {
Mallikarjuna R Chilakala620fa032009-06-04 11:11:13 +00005992 hw->mac.ops.fc_enable(hw, 0);
Peter P Waskiewicz Jr264857b2009-05-17 12:35:16 +00005993 }
5994#else
Mallikarjuna R Chilakala620fa032009-06-04 11:11:13 +00005995 hw->mac.ops.fc_enable(hw, 0);
Peter P Waskiewicz Jr264857b2009-05-17 12:35:16 +00005996#endif
5997 }
5998
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07005999 if (link_up ||
6000 time_after(jiffies, (adapter->link_check_timeout +
Joe Perchese8e9f692010-09-07 21:34:53 +00006001 IXGBE_TRY_LINK_TIMEOUT))) {
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07006002 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
Peter P Waskiewicz Jr264857b2009-05-17 12:35:16 +00006003 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07006004 }
6005 adapter->link_up = link_up;
6006 adapter->link_speed = link_speed;
6007 }
Auke Kok9a799d72007-09-15 14:07:45 -07006008
6009 if (link_up) {
6010 if (!netif_carrier_ok(netdev)) {
PJ Waskiewicze8e26352009-02-27 15:45:05 +00006011 bool flow_rx, flow_tx;
6012
Alexander Duyckbd508172010-11-16 19:27:03 -08006013 switch (hw->mac.type) {
6014 case ixgbe_mac_82598EB: {
PJ Waskiewicze8e26352009-02-27 15:45:05 +00006015 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
6016 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
Peter P Waskiewicz Jr078788b2009-07-16 15:50:32 +00006017 flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
6018 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00006019 }
Alexander Duyckbd508172010-11-16 19:27:03 -08006020 break;
Don Skidmoreb93a2222010-11-16 19:27:17 -08006021 case ixgbe_mac_82599EB:
6022 case ixgbe_mac_X540: {
Alexander Duyckbd508172010-11-16 19:27:03 -08006023 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
6024 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
6025 flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
6026 flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
6027 }
6028 break;
6029 default:
6030 flow_tx = false;
6031 flow_rx = false;
6032 break;
6033 }
PJ Waskiewicze8e26352009-02-27 15:45:05 +00006034
Emil Tantilov396e7992010-07-01 20:05:12 +00006035 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
Jeff Kirshera46e5342008-11-27 00:22:21 -08006036 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
Emil Tantilov849c4542010-06-03 16:53:41 +00006037 "10 Gbps" :
6038 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
6039 "1 Gbps" : "unknown speed")),
PJ Waskiewicze8e26352009-02-27 15:45:05 +00006040 ((flow_rx && flow_tx) ? "RX/TX" :
Emil Tantilov849c4542010-06-03 16:53:41 +00006041 (flow_rx ? "RX" :
6042 (flow_tx ? "TX" : "None"))));
Auke Kok9a799d72007-09-15 14:07:45 -07006043
6044 netif_carrier_on(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07006045 } else {
6046 /* Force detection of hung controller */
Alexander Duyck7d637bc2010-11-16 19:26:56 -08006047 for (i = 0; i < adapter->num_tx_queues; i++) {
6048 tx_ring = adapter->tx_ring[i];
6049 set_check_for_tx_hang(tx_ring);
6050 }
Auke Kok9a799d72007-09-15 14:07:45 -07006051 }
6052 } else {
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07006053 adapter->link_up = false;
6054 adapter->link_speed = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07006055 if (netif_carrier_ok(netdev)) {
Emil Tantilov396e7992010-07-01 20:05:12 +00006056 e_info(drv, "NIC Link is Down\n");
Auke Kok9a799d72007-09-15 14:07:45 -07006057 netif_carrier_off(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07006058 }
6059 }
6060
Nelson, Shannonbc59fcd2009-04-27 22:43:12 +00006061 if (!netif_carrier_ok(netdev)) {
6062 for (i = 0; i < adapter->num_tx_queues; i++) {
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00006063 tx_ring = adapter->tx_ring[i];
Nelson, Shannonbc59fcd2009-04-27 22:43:12 +00006064 if (tx_ring->next_to_use != tx_ring->next_to_clean) {
6065 some_tx_pending = 1;
6066 break;
6067 }
6068 }
6069
6070 if (some_tx_pending) {
6071 /* We've lost link, so the controller stops DMA,
6072 * but we've got queued Tx work that's never going
6073 * to get done, so reset controller to flush Tx.
6074 * (Do the reset outside of interrupt context).
6075 */
6076 schedule_work(&adapter->reset_task);
6077 }
6078 }
6079
Auke Kok9a799d72007-09-15 14:07:45 -07006080 ixgbe_update_stats(adapter);
John Fastabend10eec952010-02-03 14:23:32 +00006081 mutex_unlock(&ixgbe_watchdog_lock);
Auke Kok9a799d72007-09-15 14:07:45 -07006082}
6083
Auke Kok9a799d72007-09-15 14:07:45 -07006084static int ixgbe_tso(struct ixgbe_adapter *adapter,
Joe Perchese8e9f692010-09-07 21:34:53 +00006085 struct ixgbe_ring *tx_ring, struct sk_buff *skb,
Hao Zheng5e09a102010-11-11 13:47:59 +00006086 u32 tx_flags, u8 *hdr_len, __be16 protocol)
Auke Kok9a799d72007-09-15 14:07:45 -07006087{
6088 struct ixgbe_adv_tx_context_desc *context_desc;
6089 unsigned int i;
6090 int err;
6091 struct ixgbe_tx_buffer *tx_buffer_info;
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07006092 u32 vlan_macip_lens = 0, type_tucmd_mlhl;
6093 u32 mss_l4len_idx, l4len;
Auke Kok9a799d72007-09-15 14:07:45 -07006094
6095 if (skb_is_gso(skb)) {
6096 if (skb_header_cloned(skb)) {
6097 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
6098 if (err)
6099 return err;
6100 }
6101 l4len = tcp_hdrlen(skb);
6102 *hdr_len += l4len;
6103
Hao Zheng5e09a102010-11-11 13:47:59 +00006104 if (protocol == htons(ETH_P_IP)) {
Auke Kok9a799d72007-09-15 14:07:45 -07006105 struct iphdr *iph = ip_hdr(skb);
6106 iph->tot_len = 0;
6107 iph->check = 0;
6108 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
Joe Perchese8e9f692010-09-07 21:34:53 +00006109 iph->daddr, 0,
6110 IPPROTO_TCP,
6111 0);
Sridhar Samudrala8e1e8a42010-01-23 02:02:21 -08006112 } else if (skb_is_gso_v6(skb)) {
Auke Kok9a799d72007-09-15 14:07:45 -07006113 ipv6_hdr(skb)->payload_len = 0;
6114 tcp_hdr(skb)->check =
6115 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
Joe Perchese8e9f692010-09-07 21:34:53 +00006116 &ipv6_hdr(skb)->daddr,
6117 0, IPPROTO_TCP, 0);
Auke Kok9a799d72007-09-15 14:07:45 -07006118 }
6119
6120 i = tx_ring->next_to_use;
6121
6122 tx_buffer_info = &tx_ring->tx_buffer_info[i];
Alexander Duyck31f05a22010-08-19 13:40:31 +00006123 context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
Auke Kok9a799d72007-09-15 14:07:45 -07006124
6125 /* VLAN MACLEN IPLEN */
6126 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
6127 vlan_macip_lens |=
6128 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
6129 vlan_macip_lens |= ((skb_network_offset(skb)) <<
Joe Perchese8e9f692010-09-07 21:34:53 +00006130 IXGBE_ADVTXD_MACLEN_SHIFT);
Auke Kok9a799d72007-09-15 14:07:45 -07006131 *hdr_len += skb_network_offset(skb);
6132 vlan_macip_lens |=
6133 (skb_transport_header(skb) - skb_network_header(skb));
6134 *hdr_len +=
6135 (skb_transport_header(skb) - skb_network_header(skb));
6136 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
6137 context_desc->seqnum_seed = 0;
6138
6139 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07006140 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
Joe Perchese8e9f692010-09-07 21:34:53 +00006141 IXGBE_ADVTXD_DTYP_CTXT);
Auke Kok9a799d72007-09-15 14:07:45 -07006142
Hao Zheng5e09a102010-11-11 13:47:59 +00006143 if (protocol == htons(ETH_P_IP))
Auke Kok9a799d72007-09-15 14:07:45 -07006144 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
6145 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
6146 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
6147
6148 /* MSS L4LEN IDX */
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07006149 mss_l4len_idx =
Auke Kok9a799d72007-09-15 14:07:45 -07006150 (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
6151 mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
PJ Waskiewicz4eeae6f2008-08-26 04:27:30 -07006152 /* use index 1 for TSO */
6153 mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
Auke Kok9a799d72007-09-15 14:07:45 -07006154 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
6155
6156 tx_buffer_info->time_stamp = jiffies;
6157 tx_buffer_info->next_to_watch = i;
6158
6159 i++;
6160 if (i == tx_ring->count)
6161 i = 0;
6162 tx_ring->next_to_use = i;
6163
6164 return true;
6165 }
6166 return false;
6167}
6168
Hao Zheng5e09a102010-11-11 13:47:59 +00006169static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb,
6170 __be16 protocol)
Joe Perches7ca647b2010-09-07 21:35:40 +00006171{
6172 u32 rtn = 0;
Joe Perches7ca647b2010-09-07 21:35:40 +00006173
6174 switch (protocol) {
6175 case cpu_to_be16(ETH_P_IP):
6176 rtn |= IXGBE_ADVTXD_TUCMD_IPV4;
6177 switch (ip_hdr(skb)->protocol) {
6178 case IPPROTO_TCP:
6179 rtn |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
6180 break;
6181 case IPPROTO_SCTP:
6182 rtn |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
6183 break;
6184 }
6185 break;
6186 case cpu_to_be16(ETH_P_IPV6):
6187 /* XXX what about other V6 headers?? */
6188 switch (ipv6_hdr(skb)->nexthdr) {
6189 case IPPROTO_TCP:
6190 rtn |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
6191 break;
6192 case IPPROTO_SCTP:
6193 rtn |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
6194 break;
6195 }
6196 break;
6197 default:
6198 if (unlikely(net_ratelimit()))
6199 e_warn(probe, "partial checksum but proto=%x!\n",
Hao Zheng5e09a102010-11-11 13:47:59 +00006200 protocol);
Joe Perches7ca647b2010-09-07 21:35:40 +00006201 break;
6202 }
6203
6204 return rtn;
6205}
6206
Auke Kok9a799d72007-09-15 14:07:45 -07006207static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
Joe Perchese8e9f692010-09-07 21:34:53 +00006208 struct ixgbe_ring *tx_ring,
Hao Zheng5e09a102010-11-11 13:47:59 +00006209 struct sk_buff *skb, u32 tx_flags,
6210 __be16 protocol)
Auke Kok9a799d72007-09-15 14:07:45 -07006211{
6212 struct ixgbe_adv_tx_context_desc *context_desc;
6213 unsigned int i;
6214 struct ixgbe_tx_buffer *tx_buffer_info;
6215 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
6216
6217 if (skb->ip_summed == CHECKSUM_PARTIAL ||
6218 (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
6219 i = tx_ring->next_to_use;
6220 tx_buffer_info = &tx_ring->tx_buffer_info[i];
Alexander Duyck31f05a22010-08-19 13:40:31 +00006221 context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
Auke Kok9a799d72007-09-15 14:07:45 -07006222
6223 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
6224 vlan_macip_lens |=
6225 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
6226 vlan_macip_lens |= (skb_network_offset(skb) <<
Joe Perchese8e9f692010-09-07 21:34:53 +00006227 IXGBE_ADVTXD_MACLEN_SHIFT);
Auke Kok9a799d72007-09-15 14:07:45 -07006228 if (skb->ip_summed == CHECKSUM_PARTIAL)
6229 vlan_macip_lens |= (skb_transport_header(skb) -
Joe Perchese8e9f692010-09-07 21:34:53 +00006230 skb_network_header(skb));
Auke Kok9a799d72007-09-15 14:07:45 -07006231
6232 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
6233 context_desc->seqnum_seed = 0;
6234
6235 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
Joe Perchese8e9f692010-09-07 21:34:53 +00006236 IXGBE_ADVTXD_DTYP_CTXT);
Auke Kok9a799d72007-09-15 14:07:45 -07006237
Joe Perches7ca647b2010-09-07 21:35:40 +00006238 if (skb->ip_summed == CHECKSUM_PARTIAL)
Hao Zheng5e09a102010-11-11 13:47:59 +00006239 type_tucmd_mlhl |= ixgbe_psum(adapter, skb, protocol);
Auke Kok9a799d72007-09-15 14:07:45 -07006240
6241 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
PJ Waskiewicz4eeae6f2008-08-26 04:27:30 -07006242 /* use index zero for tx checksum offload */
Auke Kok9a799d72007-09-15 14:07:45 -07006243 context_desc->mss_l4len_idx = 0;
6244
6245 tx_buffer_info->time_stamp = jiffies;
6246 tx_buffer_info->next_to_watch = i;
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07006247
Auke Kok9a799d72007-09-15 14:07:45 -07006248 i++;
6249 if (i == tx_ring->count)
6250 i = 0;
6251 tx_ring->next_to_use = i;
6252
6253 return true;
6254 }
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07006255
Auke Kok9a799d72007-09-15 14:07:45 -07006256 return false;
6257}
6258
6259static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
Joe Perchese8e9f692010-09-07 21:34:53 +00006260 struct ixgbe_ring *tx_ring,
6261 struct sk_buff *skb, u32 tx_flags,
Alexander Duyck8ad494b2010-11-16 19:26:47 -08006262 unsigned int first, const u8 hdr_len)
Auke Kok9a799d72007-09-15 14:07:45 -07006263{
Alexander Duyckb6ec8952010-11-16 19:26:49 -08006264 struct device *dev = tx_ring->dev;
Auke Kok9a799d72007-09-15 14:07:45 -07006265 struct ixgbe_tx_buffer *tx_buffer_info;
Yi Zoueacd73f2009-05-13 13:11:06 +00006266 unsigned int len;
6267 unsigned int total = skb->len;
Auke Kok9a799d72007-09-15 14:07:45 -07006268 unsigned int offset = 0, size, count = 0, i;
6269 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
6270 unsigned int f;
Alexander Duyck8ad494b2010-11-16 19:26:47 -08006271 unsigned int bytecount = skb->len;
6272 u16 gso_segs = 1;
Auke Kok9a799d72007-09-15 14:07:45 -07006273
6274 i = tx_ring->next_to_use;
6275
Yi Zoueacd73f2009-05-13 13:11:06 +00006276 if (tx_flags & IXGBE_TX_FLAGS_FCOE)
6277 /* excluding fcoe_crc_eof for FCoE */
6278 total -= sizeof(struct fcoe_crc_eof);
6279
6280 len = min(skb_headlen(skb), total);
Auke Kok9a799d72007-09-15 14:07:45 -07006281 while (len) {
6282 tx_buffer_info = &tx_ring->tx_buffer_info[i];
6283 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
6284
6285 tx_buffer_info->length = size;
Alexander Duycke5a43542009-12-02 16:46:56 +00006286 tx_buffer_info->mapped_as_page = false;
Alexander Duyckb6ec8952010-11-16 19:26:49 -08006287 tx_buffer_info->dma = dma_map_single(dev,
Alexander Duycke5a43542009-12-02 16:46:56 +00006288 skb->data + offset,
Nick Nunley1b507732010-04-27 13:10:27 +00006289 size, DMA_TO_DEVICE);
Alexander Duyckb6ec8952010-11-16 19:26:49 -08006290 if (dma_mapping_error(dev, tx_buffer_info->dma))
Alexander Duycke5a43542009-12-02 16:46:56 +00006291 goto dma_error;
Auke Kok9a799d72007-09-15 14:07:45 -07006292 tx_buffer_info->time_stamp = jiffies;
6293 tx_buffer_info->next_to_watch = i;
6294
6295 len -= size;
Yi Zoueacd73f2009-05-13 13:11:06 +00006296 total -= size;
Auke Kok9a799d72007-09-15 14:07:45 -07006297 offset += size;
6298 count++;
Alexander Duyck44df32c2009-03-31 21:34:23 +00006299
6300 if (len) {
6301 i++;
6302 if (i == tx_ring->count)
6303 i = 0;
6304 }
Auke Kok9a799d72007-09-15 14:07:45 -07006305 }
6306
6307 for (f = 0; f < nr_frags; f++) {
6308 struct skb_frag_struct *frag;
6309
6310 frag = &skb_shinfo(skb)->frags[f];
Yi Zoueacd73f2009-05-13 13:11:06 +00006311 len = min((unsigned int)frag->size, total);
Alexander Duycke5a43542009-12-02 16:46:56 +00006312 offset = frag->page_offset;
Auke Kok9a799d72007-09-15 14:07:45 -07006313
6314 while (len) {
Alexander Duyck44df32c2009-03-31 21:34:23 +00006315 i++;
6316 if (i == tx_ring->count)
6317 i = 0;
6318
Auke Kok9a799d72007-09-15 14:07:45 -07006319 tx_buffer_info = &tx_ring->tx_buffer_info[i];
6320 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
6321
6322 tx_buffer_info->length = size;
Alexander Duyckb6ec8952010-11-16 19:26:49 -08006323 tx_buffer_info->dma = dma_map_page(dev,
Alexander Duycke5a43542009-12-02 16:46:56 +00006324 frag->page,
6325 offset, size,
Nick Nunley1b507732010-04-27 13:10:27 +00006326 DMA_TO_DEVICE);
Alexander Duycke5a43542009-12-02 16:46:56 +00006327 tx_buffer_info->mapped_as_page = true;
Alexander Duyckb6ec8952010-11-16 19:26:49 -08006328 if (dma_mapping_error(dev, tx_buffer_info->dma))
Alexander Duycke5a43542009-12-02 16:46:56 +00006329 goto dma_error;
Auke Kok9a799d72007-09-15 14:07:45 -07006330 tx_buffer_info->time_stamp = jiffies;
6331 tx_buffer_info->next_to_watch = i;
6332
6333 len -= size;
Yi Zoueacd73f2009-05-13 13:11:06 +00006334 total -= size;
Auke Kok9a799d72007-09-15 14:07:45 -07006335 offset += size;
6336 count++;
Auke Kok9a799d72007-09-15 14:07:45 -07006337 }
Yi Zoueacd73f2009-05-13 13:11:06 +00006338 if (total == 0)
6339 break;
Auke Kok9a799d72007-09-15 14:07:45 -07006340 }
Alexander Duyck44df32c2009-03-31 21:34:23 +00006341
Alexander Duyck8ad494b2010-11-16 19:26:47 -08006342 if (tx_flags & IXGBE_TX_FLAGS_TSO)
6343 gso_segs = skb_shinfo(skb)->gso_segs;
6344#ifdef IXGBE_FCOE
6345 /* adjust for FCoE Sequence Offload */
6346 else if (tx_flags & IXGBE_TX_FLAGS_FSO)
6347 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
6348 skb_shinfo(skb)->gso_size);
6349#endif /* IXGBE_FCOE */
6350 bytecount += (gso_segs - 1) * hdr_len;
6351
6352 /* multiply data chunks by size of headers */
6353 tx_ring->tx_buffer_info[i].bytecount = bytecount;
6354 tx_ring->tx_buffer_info[i].gso_segs = gso_segs;
Auke Kok9a799d72007-09-15 14:07:45 -07006355 tx_ring->tx_buffer_info[i].skb = skb;
6356 tx_ring->tx_buffer_info[first].next_to_watch = i;
6357
6358 return count;
Alexander Duycke5a43542009-12-02 16:46:56 +00006359
6360dma_error:
Emil Tantilov849c4542010-06-03 16:53:41 +00006361 e_dev_err("TX DMA map failed\n");
Alexander Duycke5a43542009-12-02 16:46:56 +00006362
6363 /* clear timestamp and dma mappings for failed tx_buffer_info map */
6364 tx_buffer_info->dma = 0;
6365 tx_buffer_info->time_stamp = 0;
6366 tx_buffer_info->next_to_watch = 0;
Roel Kluinc1fa3472010-01-19 14:21:45 +00006367 if (count)
6368 count--;
Alexander Duycke5a43542009-12-02 16:46:56 +00006369
6370 /* clear timestamp and dma mappings for remaining portion of packet */
Roel Kluinc1fa3472010-01-19 14:21:45 +00006371 while (count--) {
Joe Perchese8e9f692010-09-07 21:34:53 +00006372 if (i == 0)
Alexander Duycke5a43542009-12-02 16:46:56 +00006373 i += tx_ring->count;
Roel Kluinc1fa3472010-01-19 14:21:45 +00006374 i--;
Alexander Duycke5a43542009-12-02 16:46:56 +00006375 tx_buffer_info = &tx_ring->tx_buffer_info[i];
Alexander Duyckb6ec8952010-11-16 19:26:49 -08006376 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
Alexander Duycke5a43542009-12-02 16:46:56 +00006377 }
6378
Anton Blancharde44d38e2010-02-03 13:12:51 +00006379 return 0;
Auke Kok9a799d72007-09-15 14:07:45 -07006380}
6381
Alexander Duyck84ea2592010-11-16 19:26:49 -08006382static void ixgbe_tx_queue(struct ixgbe_ring *tx_ring,
Joe Perchese8e9f692010-09-07 21:34:53 +00006383 int tx_flags, int count, u32 paylen, u8 hdr_len)
Auke Kok9a799d72007-09-15 14:07:45 -07006384{
6385 union ixgbe_adv_tx_desc *tx_desc = NULL;
6386 struct ixgbe_tx_buffer *tx_buffer_info;
6387 u32 olinfo_status = 0, cmd_type_len = 0;
6388 unsigned int i;
6389 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
6390
6391 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
6392
6393 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
6394
6395 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
6396 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
6397
6398 if (tx_flags & IXGBE_TX_FLAGS_TSO) {
6399 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
6400
6401 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
Joe Perchese8e9f692010-09-07 21:34:53 +00006402 IXGBE_ADVTXD_POPTS_SHIFT;
Auke Kok9a799d72007-09-15 14:07:45 -07006403
PJ Waskiewicz4eeae6f2008-08-26 04:27:30 -07006404 /* use index 1 context for tso */
6405 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
Auke Kok9a799d72007-09-15 14:07:45 -07006406 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
6407 olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
Joe Perchese8e9f692010-09-07 21:34:53 +00006408 IXGBE_ADVTXD_POPTS_SHIFT;
Auke Kok9a799d72007-09-15 14:07:45 -07006409
6410 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
6411 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
Joe Perchese8e9f692010-09-07 21:34:53 +00006412 IXGBE_ADVTXD_POPTS_SHIFT;
Auke Kok9a799d72007-09-15 14:07:45 -07006413
Yi Zoueacd73f2009-05-13 13:11:06 +00006414 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
6415 olinfo_status |= IXGBE_ADVTXD_CC;
6416 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
6417 if (tx_flags & IXGBE_TX_FLAGS_FSO)
6418 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
6419 }
6420
Auke Kok9a799d72007-09-15 14:07:45 -07006421 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
6422
6423 i = tx_ring->next_to_use;
6424 while (count--) {
6425 tx_buffer_info = &tx_ring->tx_buffer_info[i];
Alexander Duyck31f05a22010-08-19 13:40:31 +00006426 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
Auke Kok9a799d72007-09-15 14:07:45 -07006427 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
6428 tx_desc->read.cmd_type_len =
Joe Perchese8e9f692010-09-07 21:34:53 +00006429 cpu_to_le32(cmd_type_len | tx_buffer_info->length);
Auke Kok9a799d72007-09-15 14:07:45 -07006430 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
Auke Kok9a799d72007-09-15 14:07:45 -07006431 i++;
6432 if (i == tx_ring->count)
6433 i = 0;
6434 }
6435
6436 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
6437
6438 /*
6439 * Force memory writes to complete before letting h/w
6440 * know there are new descriptors to fetch. (Only
6441 * applicable for weak-ordered memory model archs,
6442 * such as IA-64).
6443 */
6444 wmb();
6445
6446 tx_ring->next_to_use = i;
Alexander Duyck84ea2592010-11-16 19:26:49 -08006447 writel(i, tx_ring->tail);
Auke Kok9a799d72007-09-15 14:07:45 -07006448}
6449
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00006450static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
Alexander Duyckee9e0f02010-11-16 19:27:01 -08006451 u8 queue, u32 tx_flags, __be16 protocol)
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00006452{
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00006453 struct ixgbe_atr_input atr_input;
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00006454 struct iphdr *iph = ip_hdr(skb);
6455 struct ethhdr *eth = (struct ethhdr *)skb->data;
Alexander Duyckee9e0f02010-11-16 19:27:01 -08006456 struct tcphdr *th;
6457 u16 vlan_id;
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00006458
Alexander Duyckee9e0f02010-11-16 19:27:01 -08006459 /* Right now, we support IPv4 w/ TCP only */
6460 if (protocol != htons(ETH_P_IP) ||
6461 iph->protocol != IPPROTO_TCP)
Guillaume Gaudonvilled3ead242010-06-29 18:29:00 +00006462 return;
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00006463
6464 memset(&atr_input, 0, sizeof(struct ixgbe_atr_input));
6465
6466 vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >>
Joe Perchese8e9f692010-09-07 21:34:53 +00006467 IXGBE_TX_FLAGS_VLAN_SHIFT;
Alexander Duyckee9e0f02010-11-16 19:27:01 -08006468
6469 th = tcp_hdr(skb);
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00006470
6471 ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id);
Alexander Duyckee9e0f02010-11-16 19:27:01 -08006472 ixgbe_atr_set_src_port_82599(&atr_input, th->dest);
6473 ixgbe_atr_set_dst_port_82599(&atr_input, th->source);
6474 ixgbe_atr_set_flex_byte_82599(&atr_input, eth->h_proto);
6475 ixgbe_atr_set_l4type_82599(&atr_input, IXGBE_ATR_L4TYPE_TCP);
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00006476 /* src and dst are inverted, think how the receiver sees them */
Alexander Duyckee9e0f02010-11-16 19:27:01 -08006477 ixgbe_atr_set_src_ipv4_82599(&atr_input, iph->daddr);
6478 ixgbe_atr_set_dst_ipv4_82599(&atr_input, iph->saddr);
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00006479
6480 /* This assumes the Rx queue and Tx queue are bound to the same CPU */
6481 ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue);
6482}
6483
Alexander Duyckfc77dc32010-11-16 19:26:51 -08006484static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08006485{
Alexander Duyckfc77dc32010-11-16 19:26:51 -08006486 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08006487 /* Herbert's original patch had:
6488 * smp_mb__after_netif_stop_queue();
6489 * but since that doesn't exist yet, just open code it. */
6490 smp_mb();
6491
6492 /* We need to check again in a case another CPU has just
6493 * made room available. */
6494 if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
6495 return -EBUSY;
6496
6497 /* A reprieve! - use start_queue because it doesn't call schedule */
Alexander Duyckfc77dc32010-11-16 19:26:51 -08006498 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
Alexander Duyck5b7da512010-11-16 19:26:50 -08006499 ++tx_ring->tx_stats.restart_queue;
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08006500 return 0;
6501}
6502
Alexander Duyckfc77dc32010-11-16 19:26:51 -08006503static int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08006504{
6505 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
6506 return 0;
Alexander Duyckfc77dc32010-11-16 19:26:51 -08006507 return __ixgbe_maybe_stop_tx(tx_ring, size);
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08006508}
6509
Stephen Hemminger09a3b1f2009-03-21 13:40:01 -07006510static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
6511{
6512 struct ixgbe_adapter *adapter = netdev_priv(dev);
Yi Zou5f715822009-12-03 11:32:44 +00006513 int txq = smp_processor_id();
John Fastabend56075a92010-07-26 20:41:31 +00006514#ifdef IXGBE_FCOE
Hao Zheng5e09a102010-11-11 13:47:59 +00006515 __be16 protocol;
6516
6517 protocol = vlan_get_protocol(skb);
6518
6519 if ((protocol == htons(ETH_P_FCOE)) ||
6520 (protocol == htons(ETH_P_FIP))) {
John Fastabend56075a92010-07-26 20:41:31 +00006521 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
6522 txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
6523 txq += adapter->ring_feature[RING_F_FCOE].mask;
6524 return txq;
John Fastabend4bc091d2010-08-08 15:46:15 +00006525#ifdef CONFIG_IXGBE_DCB
John Fastabend56075a92010-07-26 20:41:31 +00006526 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6527 txq = adapter->fcoe.up;
6528 return txq;
John Fastabend4bc091d2010-08-08 15:46:15 +00006529#endif
John Fastabend56075a92010-07-26 20:41:31 +00006530 }
6531 }
6532#endif
6533
Krishna Kumarfdd3d632010-02-03 13:13:10 +00006534 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
6535 while (unlikely(txq >= dev->real_num_tx_queues))
6536 txq -= dev->real_num_tx_queues;
Yi Zou5f715822009-12-03 11:32:44 +00006537 return txq;
Krishna Kumarfdd3d632010-02-03 13:13:10 +00006538 }
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00006539
John Fastabend2ea186a2010-02-27 03:28:24 -08006540 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6541 if (skb->priority == TC_PRIO_CONTROL)
6542 txq = adapter->ring_feature[RING_F_DCB].indices-1;
6543 else
6544 txq = (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK)
6545 >> 13;
6546 return txq;
6547 }
Stephen Hemminger09a3b1f2009-03-21 13:40:01 -07006548
6549 return skb_tx_hash(dev, skb);
6550}
6551
Alexander Duyckfc77dc32010-11-16 19:26:51 -08006552netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
Alexander Duyck84418e32010-08-19 13:40:54 +00006553 struct ixgbe_adapter *adapter,
6554 struct ixgbe_ring *tx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07006555{
Alexander Duyckfc77dc32010-11-16 19:26:51 -08006556 struct net_device *netdev = tx_ring->netdev;
Eric Dumazet60d51132009-12-08 07:22:03 +00006557 struct netdev_queue *txq;
Auke Kok9a799d72007-09-15 14:07:45 -07006558 unsigned int first;
6559 unsigned int tx_flags = 0;
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -08006560 u8 hdr_len = 0;
Yi Zou5f715822009-12-03 11:32:44 +00006561 int tso;
Auke Kok9a799d72007-09-15 14:07:45 -07006562 int count = 0;
6563 unsigned int f;
Hao Zheng5e09a102010-11-11 13:47:59 +00006564 __be16 protocol;
6565
6566 protocol = vlan_get_protocol(skb);
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07006567
Jesse Grosseab6d182010-10-20 13:56:03 +00006568 if (vlan_tx_tag_present(skb)) {
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07006569 tx_flags |= vlan_tx_tag_get(skb);
Alexander Duyck2f90b862008-11-20 20:52:10 -08006570 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6571 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
Yi Zou5f715822009-12-03 11:32:44 +00006572 tx_flags |= ((skb->queue_mapping & 0x7) << 13);
Alexander Duyck2f90b862008-11-20 20:52:10 -08006573 }
6574 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
6575 tx_flags |= IXGBE_TX_FLAGS_VLAN;
John Fastabend33c66bd2010-05-18 16:00:11 +00006576 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED &&
6577 skb->priority != TC_PRIO_CONTROL) {
John Fastabend2ea186a2010-02-27 03:28:24 -08006578 tx_flags |= ((skb->queue_mapping & 0x7) << 13);
6579 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
6580 tx_flags |= IXGBE_TX_FLAGS_VLAN;
Auke Kok9a799d72007-09-15 14:07:45 -07006581 }
Yi Zoueacd73f2009-05-13 13:11:06 +00006582
Yi Zou09ad1cc2009-09-03 14:56:10 +00006583#ifdef IXGBE_FCOE
John Fastabend56075a92010-07-26 20:41:31 +00006584 /* for FCoE with DCB, we force the priority to what
6585 * was specified by the switch */
6586 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED &&
Hao Zheng5e09a102010-11-11 13:47:59 +00006587 (protocol == htons(ETH_P_FCOE) ||
6588 protocol == htons(ETH_P_FIP))) {
John Fastabend4bc091d2010-08-08 15:46:15 +00006589#ifdef CONFIG_IXGBE_DCB
6590 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6591 tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
6592 << IXGBE_TX_FLAGS_VLAN_SHIFT);
6593 tx_flags |= ((adapter->fcoe.up << 13)
6594 << IXGBE_TX_FLAGS_VLAN_SHIFT);
6595 }
6596#endif
Robert Loveca77cd52010-03-24 12:45:00 +00006597 /* flag for FCoE offloads */
Hao Zheng5e09a102010-11-11 13:47:59 +00006598 if (protocol == htons(ETH_P_FCOE))
Robert Loveca77cd52010-03-24 12:45:00 +00006599 tx_flags |= IXGBE_TX_FLAGS_FCOE;
Yi Zou09ad1cc2009-09-03 14:56:10 +00006600 }
Robert Loveca77cd52010-03-24 12:45:00 +00006601#endif
6602
Yi Zoueacd73f2009-05-13 13:11:06 +00006603 /* four things can cause us to need a context descriptor */
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07006604 if (skb_is_gso(skb) ||
6605 (skb->ip_summed == CHECKSUM_PARTIAL) ||
Yi Zoueacd73f2009-05-13 13:11:06 +00006606 (tx_flags & IXGBE_TX_FLAGS_VLAN) ||
6607 (tx_flags & IXGBE_TX_FLAGS_FCOE))
Auke Kok9a799d72007-09-15 14:07:45 -07006608 count++;
6609
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07006610 count += TXD_USE_COUNT(skb_headlen(skb));
6611 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
Auke Kok9a799d72007-09-15 14:07:45 -07006612 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
6613
Alexander Duyckfc77dc32010-11-16 19:26:51 -08006614 if (ixgbe_maybe_stop_tx(tx_ring, count)) {
Alexander Duyck5b7da512010-11-16 19:26:50 -08006615 tx_ring->tx_stats.tx_busy++;
Auke Kok9a799d72007-09-15 14:07:45 -07006616 return NETDEV_TX_BUSY;
6617 }
Auke Kok9a799d72007-09-15 14:07:45 -07006618
Auke Kok9a799d72007-09-15 14:07:45 -07006619 first = tx_ring->next_to_use;
Yi Zoueacd73f2009-05-13 13:11:06 +00006620 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
6621#ifdef IXGBE_FCOE
6622 /* setup tx offload for FCoE */
6623 tso = ixgbe_fso(adapter, tx_ring, skb, tx_flags, &hdr_len);
6624 if (tso < 0) {
6625 dev_kfree_skb_any(skb);
6626 return NETDEV_TX_OK;
6627 }
6628 if (tso)
6629 tx_flags |= IXGBE_TX_FLAGS_FSO;
6630#endif /* IXGBE_FCOE */
6631 } else {
Hao Zheng5e09a102010-11-11 13:47:59 +00006632 if (protocol == htons(ETH_P_IP))
Yi Zoueacd73f2009-05-13 13:11:06 +00006633 tx_flags |= IXGBE_TX_FLAGS_IPV4;
Hao Zheng5e09a102010-11-11 13:47:59 +00006634 tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len,
6635 protocol);
Yi Zoueacd73f2009-05-13 13:11:06 +00006636 if (tso < 0) {
6637 dev_kfree_skb_any(skb);
6638 return NETDEV_TX_OK;
6639 }
6640
6641 if (tso)
6642 tx_flags |= IXGBE_TX_FLAGS_TSO;
Hao Zheng5e09a102010-11-11 13:47:59 +00006643 else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags,
6644 protocol) &&
Yi Zoueacd73f2009-05-13 13:11:06 +00006645 (skb->ip_summed == CHECKSUM_PARTIAL))
6646 tx_flags |= IXGBE_TX_FLAGS_CSUM;
Auke Kok9a799d72007-09-15 14:07:45 -07006647 }
6648
Alexander Duyck8ad494b2010-11-16 19:26:47 -08006649 count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len);
Alexander Duyck44df32c2009-03-31 21:34:23 +00006650 if (count) {
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00006651 /* add the ATR filter if ATR is on */
6652 if (tx_ring->atr_sample_rate) {
6653 ++tx_ring->atr_count;
6654 if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
Alexander Duyck7d637bc2010-11-16 19:26:56 -08006655 test_bit(__IXGBE_TX_FDIR_INIT_DONE,
6656 &tx_ring->state)) {
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00006657 ixgbe_atr(adapter, skb, tx_ring->queue_index,
Hao Zheng5e09a102010-11-11 13:47:59 +00006658 tx_flags, protocol);
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00006659 tx_ring->atr_count = 0;
6660 }
6661 }
Eric Dumazet60d51132009-12-08 07:22:03 +00006662 txq = netdev_get_tx_queue(netdev, tx_ring->queue_index);
6663 txq->tx_bytes += skb->len;
6664 txq->tx_packets++;
Alexander Duyck84ea2592010-11-16 19:26:49 -08006665 ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len);
Alexander Duyckfc77dc32010-11-16 19:26:51 -08006666 ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
Auke Kok9a799d72007-09-15 14:07:45 -07006667
Alexander Duyck44df32c2009-03-31 21:34:23 +00006668 } else {
6669 dev_kfree_skb_any(skb);
6670 tx_ring->tx_buffer_info[first].time_stamp = 0;
6671 tx_ring->next_to_use = first;
6672 }
Auke Kok9a799d72007-09-15 14:07:45 -07006673
6674 return NETDEV_TX_OK;
6675}
6676
Alexander Duyck84418e32010-08-19 13:40:54 +00006677static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
6678{
6679 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6680 struct ixgbe_ring *tx_ring;
6681
6682 tx_ring = adapter->tx_ring[skb->queue_mapping];
Alexander Duyckfc77dc32010-11-16 19:26:51 -08006683 return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
Alexander Duyck84418e32010-08-19 13:40:54 +00006684}
6685
Auke Kok9a799d72007-09-15 14:07:45 -07006686/**
Auke Kok9a799d72007-09-15 14:07:45 -07006687 * ixgbe_set_mac - Change the Ethernet Address of the NIC
6688 * @netdev: network interface device structure
6689 * @p: pointer to an address structure
6690 *
6691 * Returns 0 on success, negative on failure
6692 **/
6693static int ixgbe_set_mac(struct net_device *netdev, void *p)
6694{
6695 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07006696 struct ixgbe_hw *hw = &adapter->hw;
Auke Kok9a799d72007-09-15 14:07:45 -07006697 struct sockaddr *addr = p;
6698
6699 if (!is_valid_ether_addr(addr->sa_data))
6700 return -EADDRNOTAVAIL;
6701
6702 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07006703 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
Auke Kok9a799d72007-09-15 14:07:45 -07006704
Greg Rose1cdd1ec2010-01-09 02:26:46 +00006705 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
6706 IXGBE_RAH_AV);
Auke Kok9a799d72007-09-15 14:07:45 -07006707
6708 return 0;
6709}
6710
Ben Hutchings6b73e102009-04-29 08:08:58 +00006711static int
6712ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
6713{
6714 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6715 struct ixgbe_hw *hw = &adapter->hw;
6716 u16 value;
6717 int rc;
6718
6719 if (prtad != hw->phy.mdio.prtad)
6720 return -EINVAL;
6721 rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
6722 if (!rc)
6723 rc = value;
6724 return rc;
6725}
6726
6727static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
6728 u16 addr, u16 value)
6729{
6730 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6731 struct ixgbe_hw *hw = &adapter->hw;
6732
6733 if (prtad != hw->phy.mdio.prtad)
6734 return -EINVAL;
6735 return hw->phy.ops.write_reg(hw, addr, devad, value);
6736}
6737
6738static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
6739{
6740 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6741
6742 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
6743}
6744
PJ Waskiewicz0365e6e2009-05-17 12:32:25 +00006745/**
6746 * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding
Jiri Pirko31278e72009-06-17 01:12:19 +00006747 * netdev->dev_addrs
PJ Waskiewicz0365e6e2009-05-17 12:32:25 +00006748 * @netdev: network interface device structure
6749 *
6750 * Returns non-zero on failure
6751 **/
6752static int ixgbe_add_sanmac_netdev(struct net_device *dev)
6753{
6754 int err = 0;
6755 struct ixgbe_adapter *adapter = netdev_priv(dev);
6756 struct ixgbe_mac_info *mac = &adapter->hw.mac;
6757
6758 if (is_valid_ether_addr(mac->san_addr)) {
6759 rtnl_lock();
6760 err = dev_addr_add(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
6761 rtnl_unlock();
6762 }
6763 return err;
6764}
6765
6766/**
6767 * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding
Jiri Pirko31278e72009-06-17 01:12:19 +00006768 * netdev->dev_addrs
PJ Waskiewicz0365e6e2009-05-17 12:32:25 +00006769 * @netdev: network interface device structure
6770 *
6771 * Returns non-zero on failure
6772 **/
6773static int ixgbe_del_sanmac_netdev(struct net_device *dev)
6774{
6775 int err = 0;
6776 struct ixgbe_adapter *adapter = netdev_priv(dev);
6777 struct ixgbe_mac_info *mac = &adapter->hw.mac;
6778
6779 if (is_valid_ether_addr(mac->san_addr)) {
6780 rtnl_lock();
6781 err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
6782 rtnl_unlock();
6783 }
6784 return err;
6785}
6786
Auke Kok9a799d72007-09-15 14:07:45 -07006787#ifdef CONFIG_NET_POLL_CONTROLLER
6788/*
6789 * Polling 'interrupt' - used by things like netconsole to send skbs
6790 * without having to re-enable interrupts. It's not called while
6791 * the interrupt routine is executing.
6792 */
6793static void ixgbe_netpoll(struct net_device *netdev)
6794{
6795 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Peter P Waskiewicz Jr8f9a7162009-07-30 12:25:09 +00006796 int i;
Auke Kok9a799d72007-09-15 14:07:45 -07006797
Alexander Duyck1a647bd2010-01-13 01:49:13 +00006798 /* if interface is down do nothing */
6799 if (test_bit(__IXGBE_DOWN, &adapter->state))
6800 return;
6801
Auke Kok9a799d72007-09-15 14:07:45 -07006802 adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
Peter P Waskiewicz Jr8f9a7162009-07-30 12:25:09 +00006803 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
6804 int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
6805 for (i = 0; i < num_q_vectors; i++) {
6806 struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
6807 ixgbe_msix_clean_many(0, q_vector);
6808 }
6809 } else {
6810 ixgbe_intr(adapter->pdev->irq, netdev);
6811 }
Auke Kok9a799d72007-09-15 14:07:45 -07006812 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
Auke Kok9a799d72007-09-15 14:07:45 -07006813}
6814#endif
6815
Eric Dumazetde1036b2010-10-20 23:00:04 +00006816static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
6817 struct rtnl_link_stats64 *stats)
6818{
6819 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6820 int i;
6821
6822 /* accurate rx/tx bytes/packets stats */
6823 dev_txq_stats_fold(netdev, stats);
Eric Dumazet1a515022010-11-16 19:26:42 -08006824 rcu_read_lock();
Eric Dumazetde1036b2010-10-20 23:00:04 +00006825 for (i = 0; i < adapter->num_rx_queues; i++) {
Eric Dumazet1a515022010-11-16 19:26:42 -08006826 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
Eric Dumazetde1036b2010-10-20 23:00:04 +00006827 u64 bytes, packets;
6828 unsigned int start;
6829
Eric Dumazet1a515022010-11-16 19:26:42 -08006830 if (ring) {
6831 do {
6832 start = u64_stats_fetch_begin_bh(&ring->syncp);
6833 packets = ring->stats.packets;
6834 bytes = ring->stats.bytes;
6835 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
6836 stats->rx_packets += packets;
6837 stats->rx_bytes += bytes;
6838 }
Eric Dumazetde1036b2010-10-20 23:00:04 +00006839 }
Eric Dumazet1a515022010-11-16 19:26:42 -08006840 rcu_read_unlock();
Eric Dumazetde1036b2010-10-20 23:00:04 +00006841 /* following stats updated by ixgbe_watchdog_task() */
6842 stats->multicast = netdev->stats.multicast;
6843 stats->rx_errors = netdev->stats.rx_errors;
6844 stats->rx_length_errors = netdev->stats.rx_length_errors;
6845 stats->rx_crc_errors = netdev->stats.rx_crc_errors;
6846 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
6847 return stats;
6848}
6849
6850
Stephen Hemminger0edc3522008-11-19 22:24:29 -08006851static const struct net_device_ops ixgbe_netdev_ops = {
Joe Perchese8e9f692010-09-07 21:34:53 +00006852 .ndo_open = ixgbe_open,
Stephen Hemminger0edc3522008-11-19 22:24:29 -08006853 .ndo_stop = ixgbe_close,
Stephen Hemminger00829822008-11-20 20:14:53 -08006854 .ndo_start_xmit = ixgbe_xmit_frame,
Stephen Hemminger09a3b1f2009-03-21 13:40:01 -07006855 .ndo_select_queue = ixgbe_select_queue,
Chris Leeche90d4002009-03-10 16:00:24 +00006856 .ndo_set_rx_mode = ixgbe_set_rx_mode,
Stephen Hemminger0edc3522008-11-19 22:24:29 -08006857 .ndo_set_multicast_list = ixgbe_set_rx_mode,
6858 .ndo_validate_addr = eth_validate_addr,
6859 .ndo_set_mac_address = ixgbe_set_mac,
6860 .ndo_change_mtu = ixgbe_change_mtu,
6861 .ndo_tx_timeout = ixgbe_tx_timeout,
Stephen Hemminger0edc3522008-11-19 22:24:29 -08006862 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
6863 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
Ben Hutchings6b73e102009-04-29 08:08:58 +00006864 .ndo_do_ioctl = ixgbe_ioctl,
Greg Rose7f016482010-05-04 22:12:06 +00006865 .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
6866 .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
6867 .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw,
6868 .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
Eric Dumazetde1036b2010-10-20 23:00:04 +00006869 .ndo_get_stats64 = ixgbe_get_stats64,
Stephen Hemminger0edc3522008-11-19 22:24:29 -08006870#ifdef CONFIG_NET_POLL_CONTROLLER
6871 .ndo_poll_controller = ixgbe_netpoll,
6872#endif
Yi Zou332d4a72009-05-13 13:11:53 +00006873#ifdef IXGBE_FCOE
6874 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
6875 .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
Yi Zou8450ff82009-08-31 12:32:14 +00006876 .ndo_fcoe_enable = ixgbe_fcoe_enable,
6877 .ndo_fcoe_disable = ixgbe_fcoe_disable,
Yi Zou61a1fa12009-10-28 18:24:56 +00006878 .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
Yi Zou332d4a72009-05-13 13:11:53 +00006879#endif /* IXGBE_FCOE */
Stephen Hemminger0edc3522008-11-19 22:24:29 -08006880};
6881
Greg Rose1cdd1ec2010-01-09 02:26:46 +00006882static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
6883 const struct ixgbe_info *ii)
6884{
6885#ifdef CONFIG_PCI_IOV
6886 struct ixgbe_hw *hw = &adapter->hw;
6887 int err;
6888
6889 if (hw->mac.type != ixgbe_mac_82599EB || !max_vfs)
6890 return;
6891
6892 /* The 82599 supports up to 64 VFs per physical function
6893 * but this implementation limits allocation to 63 so that
6894 * basic networking resources are still available to the
6895 * physical function
6896 */
6897 adapter->num_vfs = (max_vfs > 63) ? 63 : max_vfs;
6898 adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
6899 err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
6900 if (err) {
Emil Tantilov396e7992010-07-01 20:05:12 +00006901 e_err(probe, "Failed to enable PCI sriov: %d\n", err);
Greg Rose1cdd1ec2010-01-09 02:26:46 +00006902 goto err_novfs;
6903 }
6904 /* If call to enable VFs succeeded then allocate memory
6905 * for per VF control structures.
6906 */
6907 adapter->vfinfo =
6908 kcalloc(adapter->num_vfs,
6909 sizeof(struct vf_data_storage), GFP_KERNEL);
6910 if (adapter->vfinfo) {
6911 /* Now that we're sure SR-IOV is enabled
6912 * and memory allocated set up the mailbox parameters
6913 */
6914 ixgbe_init_mbx_params_pf(hw);
6915 memcpy(&hw->mbx.ops, ii->mbx_ops,
6916 sizeof(hw->mbx.ops));
6917
6918 /* Disable RSC when in SR-IOV mode */
6919 adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
6920 IXGBE_FLAG2_RSC_ENABLED);
6921 return;
6922 }
6923
6924 /* Oh oh */
Emil Tantilov396e7992010-07-01 20:05:12 +00006925 e_err(probe, "Unable to allocate memory for VF Data Storage - "
6926 "SRIOV disabled\n");
Greg Rose1cdd1ec2010-01-09 02:26:46 +00006927 pci_disable_sriov(adapter->pdev);
6928
6929err_novfs:
6930 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
6931 adapter->num_vfs = 0;
6932#endif /* CONFIG_PCI_IOV */
6933}
6934
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07006935/**
Auke Kok9a799d72007-09-15 14:07:45 -07006936 * ixgbe_probe - Device Initialization Routine
6937 * @pdev: PCI device information struct
6938 * @ent: entry in ixgbe_pci_tbl
6939 *
6940 * Returns 0 on success, negative on failure
6941 *
6942 * ixgbe_probe initializes an adapter identified by a pci_dev structure.
6943 * The OS initialization, configuring of the adapter private structure,
6944 * and a hardware reset occur.
6945 **/
6946static int __devinit ixgbe_probe(struct pci_dev *pdev,
Joe Perchese8e9f692010-09-07 21:34:53 +00006947 const struct pci_device_id *ent)
Auke Kok9a799d72007-09-15 14:07:45 -07006948{
6949 struct net_device *netdev;
6950 struct ixgbe_adapter *adapter = NULL;
6951 struct ixgbe_hw *hw;
6952 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
Auke Kok9a799d72007-09-15 14:07:45 -07006953 static int cards_found;
6954 int i, err, pci_using_dac;
John Fastabendc85a2612010-02-25 23:15:21 +00006955 unsigned int indices = num_possible_cpus();
Yi Zoueacd73f2009-05-13 13:11:06 +00006956#ifdef IXGBE_FCOE
6957 u16 device_caps;
6958#endif
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07006959 u32 part_num, eec;
Auke Kok9a799d72007-09-15 14:07:45 -07006960
Andy Gospodarekbded64a2010-07-21 06:40:31 +00006961 /* Catch broken hardware that put the wrong VF device ID in
6962 * the PCIe SR-IOV capability.
6963 */
6964 if (pdev->is_virtfn) {
6965 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
6966 pci_name(pdev), pdev->vendor, pdev->device);
6967 return -EINVAL;
6968 }
6969
gouji-new9ce77662009-05-06 10:44:45 +00006970 err = pci_enable_device_mem(pdev);
Auke Kok9a799d72007-09-15 14:07:45 -07006971 if (err)
6972 return err;
6973
Nick Nunley1b507732010-04-27 13:10:27 +00006974 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
6975 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
Auke Kok9a799d72007-09-15 14:07:45 -07006976 pci_using_dac = 1;
6977 } else {
Nick Nunley1b507732010-04-27 13:10:27 +00006978 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Auke Kok9a799d72007-09-15 14:07:45 -07006979 if (err) {
Nick Nunley1b507732010-04-27 13:10:27 +00006980 err = dma_set_coherent_mask(&pdev->dev,
6981 DMA_BIT_MASK(32));
Auke Kok9a799d72007-09-15 14:07:45 -07006982 if (err) {
Dan Carpenterb8bc0422010-07-27 00:05:56 +00006983 dev_err(&pdev->dev,
6984 "No usable DMA configuration, aborting\n");
Auke Kok9a799d72007-09-15 14:07:45 -07006985 goto err_dma;
6986 }
6987 }
6988 pci_using_dac = 0;
6989 }
6990
gouji-new9ce77662009-05-06 10:44:45 +00006991 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
Joe Perchese8e9f692010-09-07 21:34:53 +00006992 IORESOURCE_MEM), ixgbe_driver_name);
Auke Kok9a799d72007-09-15 14:07:45 -07006993 if (err) {
Dan Carpenterb8bc0422010-07-27 00:05:56 +00006994 dev_err(&pdev->dev,
6995 "pci_request_selected_regions failed 0x%x\n", err);
Auke Kok9a799d72007-09-15 14:07:45 -07006996 goto err_pci_reg;
6997 }
6998
Frans Pop19d5afd2009-10-02 10:04:12 -07006999 pci_enable_pcie_error_reporting(pdev);
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08007000
Auke Kok9a799d72007-09-15 14:07:45 -07007001 pci_set_master(pdev);
Wendy Xiongfb3b27b2008-04-23 11:09:24 -07007002 pci_save_state(pdev);
Auke Kok9a799d72007-09-15 14:07:45 -07007003
John Fastabendc85a2612010-02-25 23:15:21 +00007004 if (ii->mac == ixgbe_mac_82598EB)
7005 indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES);
7006 else
7007 indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES);
7008
7009 indices = max_t(unsigned int, indices, IXGBE_MAX_DCB_INDICES);
7010#ifdef IXGBE_FCOE
7011 indices += min_t(unsigned int, num_possible_cpus(),
7012 IXGBE_MAX_FCOE_INDICES);
7013#endif
John Fastabendc85a2612010-02-25 23:15:21 +00007014 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
Auke Kok9a799d72007-09-15 14:07:45 -07007015 if (!netdev) {
7016 err = -ENOMEM;
7017 goto err_alloc_etherdev;
7018 }
7019
Auke Kok9a799d72007-09-15 14:07:45 -07007020 SET_NETDEV_DEV(netdev, &pdev->dev);
7021
Auke Kok9a799d72007-09-15 14:07:45 -07007022 adapter = netdev_priv(netdev);
Alexander Duyckc60fbb02010-11-16 19:26:54 -08007023 pci_set_drvdata(pdev, adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07007024
7025 adapter->netdev = netdev;
7026 adapter->pdev = pdev;
7027 hw = &adapter->hw;
7028 hw->back = adapter;
7029 adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
7030
Jeff Kirsher05857982008-09-11 19:57:00 -07007031 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
Joe Perchese8e9f692010-09-07 21:34:53 +00007032 pci_resource_len(pdev, 0));
Auke Kok9a799d72007-09-15 14:07:45 -07007033 if (!hw->hw_addr) {
7034 err = -EIO;
7035 goto err_ioremap;
7036 }
7037
7038 for (i = 1; i <= 5; i++) {
7039 if (pci_resource_len(pdev, i) == 0)
7040 continue;
7041 }
7042
Stephen Hemminger0edc3522008-11-19 22:24:29 -08007043 netdev->netdev_ops = &ixgbe_netdev_ops;
Auke Kok9a799d72007-09-15 14:07:45 -07007044 ixgbe_set_ethtool_ops(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07007045 netdev->watchdog_timeo = 5 * HZ;
Auke Kok9a799d72007-09-15 14:07:45 -07007046 strcpy(netdev->name, pci_name(pdev));
7047
Auke Kok9a799d72007-09-15 14:07:45 -07007048 adapter->bd_number = cards_found;
7049
Auke Kok9a799d72007-09-15 14:07:45 -07007050 /* Setup hw api */
7051 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08007052 hw->mac.type = ii->mac;
Auke Kok9a799d72007-09-15 14:07:45 -07007053
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07007054 /* EEPROM */
7055 memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
7056 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
7057 /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
7058 if (!(eec & (1 << 8)))
7059 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
7060
7061 /* PHY */
7062 memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
Donald Skidmorec4900be2008-11-20 21:11:42 -08007063 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
Ben Hutchings6b73e102009-04-29 08:08:58 +00007064 /* ixgbe_identify_phy_generic will set prtad and mmds properly */
7065 hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
7066 hw->phy.mdio.mmds = 0;
7067 hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
7068 hw->phy.mdio.dev = netdev;
7069 hw->phy.mdio.mdio_read = ixgbe_mdio_read;
7070 hw->phy.mdio.mdio_write = ixgbe_mdio_write;
Donald Skidmorec4900be2008-11-20 21:11:42 -08007071
7072 /* set up this timer and work struct before calling get_invariants
7073 * which might start the timer
7074 */
7075 init_timer(&adapter->sfp_timer);
Joe Perchesc061b182010-08-23 18:20:03 +00007076 adapter->sfp_timer.function = ixgbe_sfp_timer;
Donald Skidmorec4900be2008-11-20 21:11:42 -08007077 adapter->sfp_timer.data = (unsigned long) adapter;
7078
7079 INIT_WORK(&adapter->sfp_task, ixgbe_sfp_task);
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07007080
PJ Waskiewicze8e26352009-02-27 15:45:05 +00007081 /* multispeed fiber has its own tasklet, called from GPI SDP1 context */
7082 INIT_WORK(&adapter->multispeed_fiber_task, ixgbe_multispeed_fiber_task);
7083
7084 /* a new SFP+ module arrival, called from GPI SDP2 context */
7085 INIT_WORK(&adapter->sfp_config_module_task,
Joe Perchese8e9f692010-09-07 21:34:53 +00007086 ixgbe_sfp_config_module_task);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00007087
Don Skidmore8ca783a2009-05-26 20:40:47 -07007088 ii->get_invariants(hw);
Auke Kok9a799d72007-09-15 14:07:45 -07007089
7090 /* setup the private structure */
7091 err = ixgbe_sw_init(adapter);
7092 if (err)
7093 goto err_sw_init;
7094
Don Skidmoree86bff02010-02-11 04:14:08 +00007095 /* Make it possible the adapter to be woken up via WOL */
Don Skidmoreb93a2222010-11-16 19:27:17 -08007096 switch (adapter->hw.mac.type) {
7097 case ixgbe_mac_82599EB:
7098 case ixgbe_mac_X540:
Don Skidmoree86bff02010-02-11 04:14:08 +00007099 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
Don Skidmoreb93a2222010-11-16 19:27:17 -08007100 break;
7101 default:
7102 break;
7103 }
Don Skidmoree86bff02010-02-11 04:14:08 +00007104
Don Skidmorebf069c92009-05-07 10:39:54 +00007105 /*
7106 * If there is a fan on this device and it has failed log the
7107 * failure.
7108 */
7109 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
7110 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
7111 if (esdp & IXGBE_ESDP_SDP1)
Emil Tantilov396e7992010-07-01 20:05:12 +00007112 e_crit(probe, "Fan has stopped, replace the adapter\n");
Don Skidmorebf069c92009-05-07 10:39:54 +00007113 }
7114
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07007115 /* reset_hw fills in the perm_addr as well */
Mallikarjuna R Chilakala119fc602010-05-20 23:07:06 -07007116 hw->phy.reset_if_overtemp = true;
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07007117 err = hw->mac.ops.reset_hw(hw);
Mallikarjuna R Chilakala119fc602010-05-20 23:07:06 -07007118 hw->phy.reset_if_overtemp = false;
Don Skidmore8ca783a2009-05-26 20:40:47 -07007119 if (err == IXGBE_ERR_SFP_NOT_PRESENT &&
7120 hw->mac.type == ixgbe_mac_82598EB) {
7121 /*
7122 * Start a kernel thread to watch for a module to arrive.
7123 * Only do this for 82598, since 82599 will generate
7124 * interrupts on module arrival.
7125 */
7126 set_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
7127 mod_timer(&adapter->sfp_timer,
7128 round_jiffies(jiffies + (2 * HZ)));
7129 err = 0;
7130 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
Emil Tantilov849c4542010-06-03 16:53:41 +00007131 e_dev_err("failed to initialize because an unsupported SFP+ "
7132 "module type was detected.\n");
7133 e_dev_err("Reload the driver after installing a supported "
7134 "module.\n");
PJ Waskiewicz04f165e2009-04-09 22:27:57 +00007135 goto err_sw_init;
7136 } else if (err) {
Emil Tantilov849c4542010-06-03 16:53:41 +00007137 e_dev_err("HW Init failed: %d\n", err);
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07007138 goto err_sw_init;
7139 }
7140
Greg Rose1cdd1ec2010-01-09 02:26:46 +00007141 ixgbe_probe_vf(adapter, ii);
7142
Emil Tantilov396e7992010-07-01 20:05:12 +00007143 netdev->features = NETIF_F_SG |
Joe Perchese8e9f692010-09-07 21:34:53 +00007144 NETIF_F_IP_CSUM |
7145 NETIF_F_HW_VLAN_TX |
7146 NETIF_F_HW_VLAN_RX |
7147 NETIF_F_HW_VLAN_FILTER;
Auke Kok9a799d72007-09-15 14:07:45 -07007148
Jesse Brandeburge9990a92008-08-26 04:27:24 -07007149 netdev->features |= NETIF_F_IPV6_CSUM;
Auke Kok9a799d72007-09-15 14:07:45 -07007150 netdev->features |= NETIF_F_TSO;
Auke Kok9a799d72007-09-15 14:07:45 -07007151 netdev->features |= NETIF_F_TSO6;
Herbert Xu78b6f4c2009-01-18 21:49:45 -08007152 netdev->features |= NETIF_F_GRO;
Jeff Kirsherad31c402008-06-05 04:05:30 -07007153
Jesse Brandeburg45a5ead2009-04-27 22:36:35 +00007154 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
7155 netdev->features |= NETIF_F_SCTP_CSUM;
7156
Jeff Kirsherad31c402008-06-05 04:05:30 -07007157 netdev->vlan_features |= NETIF_F_TSO;
7158 netdev->vlan_features |= NETIF_F_TSO6;
Jesse Brandeburg22f32b7a52008-08-26 04:27:18 -07007159 netdev->vlan_features |= NETIF_F_IP_CSUM;
Alexander Duyckcd1da502009-08-25 04:47:50 +00007160 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
Jeff Kirsherad31c402008-06-05 04:05:30 -07007161 netdev->vlan_features |= NETIF_F_SG;
7162
Greg Rose1cdd1ec2010-01-09 02:26:46 +00007163 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
7164 adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED |
7165 IXGBE_FLAG_DCB_ENABLED);
Alexander Duyck2f90b862008-11-20 20:52:10 -08007166 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
7167 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
7168
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -08007169#ifdef CONFIG_IXGBE_DCB
Alexander Duyck2f90b862008-11-20 20:52:10 -08007170 netdev->dcbnl_ops = &dcbnl_ops;
7171#endif
7172
Yi Zoueacd73f2009-05-13 13:11:06 +00007173#ifdef IXGBE_FCOE
Yi Zou0d551582009-07-22 14:07:12 +00007174 if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
Yi Zoueacd73f2009-05-13 13:11:06 +00007175 if (hw->mac.ops.get_device_caps) {
7176 hw->mac.ops.get_device_caps(hw, &device_caps);
Yi Zou0d551582009-07-22 14:07:12 +00007177 if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
7178 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
Yi Zoueacd73f2009-05-13 13:11:06 +00007179 }
7180 }
Yi Zou5e09d7f2010-07-19 13:59:52 +00007181 if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
7182 netdev->vlan_features |= NETIF_F_FCOE_CRC;
7183 netdev->vlan_features |= NETIF_F_FSO;
7184 netdev->vlan_features |= NETIF_F_FCOE_MTU;
7185 }
Yi Zoueacd73f2009-05-13 13:11:06 +00007186#endif /* IXGBE_FCOE */
Yi Zou7b872a52010-09-22 17:57:58 +00007187 if (pci_using_dac) {
Auke Kok9a799d72007-09-15 14:07:45 -07007188 netdev->features |= NETIF_F_HIGHDMA;
Yi Zou7b872a52010-09-22 17:57:58 +00007189 netdev->vlan_features |= NETIF_F_HIGHDMA;
7190 }
Auke Kok9a799d72007-09-15 14:07:45 -07007191
Peter P Waskiewicz Jr0c19d6a2009-07-30 12:25:28 +00007192 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
Alexander Duyckf8212f92009-04-27 22:42:37 +00007193 netdev->features |= NETIF_F_LRO;
7194
Auke Kok9a799d72007-09-15 14:07:45 -07007195 /* make sure the EEPROM is good */
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07007196 if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
Emil Tantilov849c4542010-06-03 16:53:41 +00007197 e_dev_err("The EEPROM Checksum Is Not Valid\n");
Auke Kok9a799d72007-09-15 14:07:45 -07007198 err = -EIO;
7199 goto err_eeprom;
7200 }
7201
7202 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
7203 memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
7204
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07007205 if (ixgbe_validate_mac_addr(netdev->perm_addr)) {
Emil Tantilov849c4542010-06-03 16:53:41 +00007206 e_dev_err("invalid MAC address\n");
Auke Kok9a799d72007-09-15 14:07:45 -07007207 err = -EIO;
7208 goto err_eeprom;
7209 }
7210
Don Skidmorec6ecf392010-12-03 03:31:51 +00007211 /* power down the optics for multispeed fiber and 82599 SFP+ fiber */
7212 if (hw->mac.ops.disable_tx_laser &&
7213 ((hw->phy.multispeed_fiber) ||
7214 ((hw->phy.type == ixgbe_media_type_fiber) &&
7215 (hw->mac.type == ixgbe_mac_82599EB))))
Peter Waskiewicz61fac742010-04-27 00:38:15 +00007216 hw->mac.ops.disable_tx_laser(hw);
7217
Auke Kok9a799d72007-09-15 14:07:45 -07007218 init_timer(&adapter->watchdog_timer);
Joe Perchesc061b182010-08-23 18:20:03 +00007219 adapter->watchdog_timer.function = ixgbe_watchdog;
Auke Kok9a799d72007-09-15 14:07:45 -07007220 adapter->watchdog_timer.data = (unsigned long)adapter;
7221
7222 INIT_WORK(&adapter->reset_task, ixgbe_reset_task);
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07007223 INIT_WORK(&adapter->watchdog_task, ixgbe_watchdog_task);
Auke Kok9a799d72007-09-15 14:07:45 -07007224
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08007225 err = ixgbe_init_interrupt_scheme(adapter);
7226 if (err)
7227 goto err_sw_init;
Auke Kok9a799d72007-09-15 14:07:45 -07007228
PJ Waskiewicze8e26352009-02-27 15:45:05 +00007229 switch (pdev->device) {
Don Skidmore0b077fe2010-12-03 03:32:13 +00007230 case IXGBE_DEV_ID_82599_SFP:
7231 /* Only this subdevice supports WOL */
7232 if (pdev->subsystem_device == IXGBE_SUBDEV_ID_82599_SFP)
7233 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
7234 IXGBE_WUFC_MC | IXGBE_WUFC_BC);
7235 break;
Alexander Duyck50d6c682010-11-16 19:27:05 -08007236 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
7237 /* All except this subdevice support WOL */
Don Skidmore0b077fe2010-12-03 03:32:13 +00007238 if (pdev->subsystem_device != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
7239 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
7240 IXGBE_WUFC_MC | IXGBE_WUFC_BC);
7241 break;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00007242 case IXGBE_DEV_ID_82599_KX4:
Waskiewicz Jr, Peter P495dce12009-04-23 11:15:18 +00007243 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
Joe Perchese8e9f692010-09-07 21:34:53 +00007244 IXGBE_WUFC_MC | IXGBE_WUFC_BC);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00007245 break;
7246 default:
7247 adapter->wol = 0;
7248 break;
7249 }
PJ Waskiewicze8e26352009-02-27 15:45:05 +00007250 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
7251
PJ Waskiewicz04f165e2009-04-09 22:27:57 +00007252 /* pick up the PCI bus settings for reporting later */
7253 hw->mac.ops.get_bus_info(hw);
7254
Auke Kok9a799d72007-09-15 14:07:45 -07007255 /* print bus type/speed/width info */
Emil Tantilov849c4542010-06-03 16:53:41 +00007256 e_dev_info("(PCI Express:%s:%s) %pM\n",
Joe Perchese8e9f692010-09-07 21:34:53 +00007257 (hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0Gb/s" :
7258 hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5Gb/s" :
7259 "Unknown"),
7260 (hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" :
7261 hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" :
7262 hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" :
7263 "Unknown"),
7264 netdev->dev_addr);
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07007265 ixgbe_read_pba_num_generic(hw, &part_num);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00007266 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
Emil Tantilov849c4542010-06-03 16:53:41 +00007267 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, "
7268 "PBA No: %06x-%03x\n",
7269 hw->mac.type, hw->phy.type, hw->phy.sfp_type,
7270 (part_num >> 8), (part_num & 0xff));
PJ Waskiewicze8e26352009-02-27 15:45:05 +00007271 else
Emil Tantilov849c4542010-06-03 16:53:41 +00007272 e_dev_info("MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
7273 hw->mac.type, hw->phy.type,
7274 (part_num >> 8), (part_num & 0xff));
Auke Kok9a799d72007-09-15 14:07:45 -07007275
PJ Waskiewicze8e26352009-02-27 15:45:05 +00007276 if (hw->bus.width <= ixgbe_bus_width_pcie_x4) {
Emil Tantilov849c4542010-06-03 16:53:41 +00007277 e_dev_warn("PCI-Express bandwidth available for this card is "
7278 "not sufficient for optimal performance.\n");
7279 e_dev_warn("For optimal performance a x8 PCI-Express slot "
7280 "is required.\n");
Auke Kok0c254d82008-02-11 09:25:56 -08007281 }
7282
Peter P Waskiewicz Jr34b03682009-02-05 23:54:42 -08007283 /* save off EEPROM version number */
7284 hw->eeprom.ops.read(hw, 0x29, &adapter->eeprom_version);
7285
Auke Kok9a799d72007-09-15 14:07:45 -07007286 /* reset the hardware with the new settings */
Peter P Waskiewicz Jr794caeb2009-06-04 16:02:24 +00007287 err = hw->mac.ops.start_hw(hw);
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07007288
Peter P Waskiewicz Jr794caeb2009-06-04 16:02:24 +00007289 if (err == IXGBE_ERR_EEPROM_VERSION) {
7290 /* We are running on a pre-production device, log a warning */
Emil Tantilov849c4542010-06-03 16:53:41 +00007291 e_dev_warn("This device is a pre-production adapter/LOM. "
7292 "Please be aware there may be issues associated "
7293 "with your hardware. If you are experiencing "
7294 "problems please contact your Intel or hardware "
7295 "representative who provided you with this "
7296 "hardware.\n");
Peter P Waskiewicz Jr794caeb2009-06-04 16:02:24 +00007297 }
Auke Kok9a799d72007-09-15 14:07:45 -07007298 strcpy(netdev->name, "eth%d");
7299 err = register_netdev(netdev);
7300 if (err)
7301 goto err_register;
7302
Jesse Brandeburg54386462009-04-17 20:44:27 +00007303 /* carrier off reporting is important to ethtool even BEFORE open */
7304 netif_carrier_off(netdev);
7305
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00007306 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
7307 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
7308 INIT_WORK(&adapter->fdir_reinit_task, ixgbe_fdir_reinit_task);
7309
Mallikarjuna R Chilakala119fc602010-05-20 23:07:06 -07007310 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
Joe Perchese8e9f692010-09-07 21:34:53 +00007311 INIT_WORK(&adapter->check_overtemp_task,
7312 ixgbe_check_overtemp_task);
Jeff Garzik5dd2d332008-10-16 05:09:31 -04007313#ifdef CONFIG_IXGBE_DCA
Denis V. Lunev652f0932008-03-27 14:39:17 +03007314 if (dca_add_requester(&pdev->dev) == 0) {
Jeb Cramerbd0362d2008-03-03 15:04:02 -08007315 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
Jeb Cramerbd0362d2008-03-03 15:04:02 -08007316 ixgbe_setup_dca(adapter);
7317 }
7318#endif
Greg Rose1cdd1ec2010-01-09 02:26:46 +00007319 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
Emil Tantilov396e7992010-07-01 20:05:12 +00007320 e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs);
Greg Rose1cdd1ec2010-01-09 02:26:46 +00007321 for (i = 0; i < adapter->num_vfs; i++)
7322 ixgbe_vf_configuration(pdev, (i | 0x10000000));
7323 }
7324
PJ Waskiewicz0365e6e2009-05-17 12:32:25 +00007325 /* add san mac addr to netdev */
7326 ixgbe_add_sanmac_netdev(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07007327
Emil Tantilov849c4542010-06-03 16:53:41 +00007328 e_dev_info("Intel(R) 10 Gigabit Network Connection\n");
Auke Kok9a799d72007-09-15 14:07:45 -07007329 cards_found++;
7330 return 0;
7331
7332err_register:
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -08007333 ixgbe_release_hw_control(adapter);
Alexander Duyck7a921c92009-05-06 10:43:28 +00007334 ixgbe_clear_interrupt_scheme(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07007335err_sw_init:
7336err_eeprom:
Greg Rose1cdd1ec2010-01-09 02:26:46 +00007337 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
7338 ixgbe_disable_sriov(adapter);
Donald Skidmorec4900be2008-11-20 21:11:42 -08007339 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
7340 del_timer_sync(&adapter->sfp_timer);
7341 cancel_work_sync(&adapter->sfp_task);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00007342 cancel_work_sync(&adapter->multispeed_fiber_task);
7343 cancel_work_sync(&adapter->sfp_config_module_task);
Auke Kok9a799d72007-09-15 14:07:45 -07007344 iounmap(hw->hw_addr);
7345err_ioremap:
7346 free_netdev(netdev);
7347err_alloc_etherdev:
Joe Perchese8e9f692010-09-07 21:34:53 +00007348 pci_release_selected_regions(pdev,
7349 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9a799d72007-09-15 14:07:45 -07007350err_pci_reg:
7351err_dma:
7352 pci_disable_device(pdev);
7353 return err;
7354}
7355
7356/**
7357 * ixgbe_remove - Device Removal Routine
7358 * @pdev: PCI device information struct
7359 *
7360 * ixgbe_remove is called by the PCI subsystem to alert the driver
7361 * that it should release a PCI device. The could be caused by a
7362 * Hot-Plug event, or because the driver is going to be removed from
7363 * memory.
7364 **/
7365static void __devexit ixgbe_remove(struct pci_dev *pdev)
7366{
Alexander Duyckc60fbb02010-11-16 19:26:54 -08007367 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
7368 struct net_device *netdev = adapter->netdev;
Auke Kok9a799d72007-09-15 14:07:45 -07007369
7370 set_bit(__IXGBE_DOWN, &adapter->state);
Donald Skidmorec4900be2008-11-20 21:11:42 -08007371 /* clear the module not found bit to make sure the worker won't
7372 * reschedule
7373 */
7374 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
Auke Kok9a799d72007-09-15 14:07:45 -07007375 del_timer_sync(&adapter->watchdog_timer);
7376
Donald Skidmorec4900be2008-11-20 21:11:42 -08007377 del_timer_sync(&adapter->sfp_timer);
7378 cancel_work_sync(&adapter->watchdog_task);
7379 cancel_work_sync(&adapter->sfp_task);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00007380 cancel_work_sync(&adapter->multispeed_fiber_task);
7381 cancel_work_sync(&adapter->sfp_config_module_task);
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00007382 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
7383 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
7384 cancel_work_sync(&adapter->fdir_reinit_task);
Auke Kok9a799d72007-09-15 14:07:45 -07007385 flush_scheduled_work();
7386
Jeff Garzik5dd2d332008-10-16 05:09:31 -04007387#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -08007388 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
7389 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
7390 dca_remove_requester(&pdev->dev);
7391 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
7392 }
7393
7394#endif
Yi Zou332d4a72009-05-13 13:11:53 +00007395#ifdef IXGBE_FCOE
7396 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
7397 ixgbe_cleanup_fcoe(adapter);
7398
7399#endif /* IXGBE_FCOE */
PJ Waskiewicz0365e6e2009-05-17 12:32:25 +00007400
7401 /* remove the added san mac */
7402 ixgbe_del_sanmac_netdev(netdev);
7403
Donald Skidmorec4900be2008-11-20 21:11:42 -08007404 if (netdev->reg_state == NETREG_REGISTERED)
7405 unregister_netdev(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07007406
Greg Rose1cdd1ec2010-01-09 02:26:46 +00007407 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
7408 ixgbe_disable_sriov(adapter);
7409
Alexander Duyck7a921c92009-05-06 10:43:28 +00007410 ixgbe_clear_interrupt_scheme(adapter);
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -08007411
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08007412 ixgbe_release_hw_control(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07007413
7414 iounmap(adapter->hw.hw_addr);
gouji-new9ce77662009-05-06 10:44:45 +00007415 pci_release_selected_regions(pdev, pci_select_bars(pdev,
Joe Perchese8e9f692010-09-07 21:34:53 +00007416 IORESOURCE_MEM));
Auke Kok9a799d72007-09-15 14:07:45 -07007417
Emil Tantilov849c4542010-06-03 16:53:41 +00007418 e_dev_info("complete\n");
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08007419
Auke Kok9a799d72007-09-15 14:07:45 -07007420 free_netdev(netdev);
7421
Frans Pop19d5afd2009-10-02 10:04:12 -07007422 pci_disable_pcie_error_reporting(pdev);
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08007423
Auke Kok9a799d72007-09-15 14:07:45 -07007424 pci_disable_device(pdev);
7425}
7426
7427/**
7428 * ixgbe_io_error_detected - called when PCI error is detected
7429 * @pdev: Pointer to PCI device
7430 * @state: The current pci connection state
7431 *
7432 * This function is called after a PCI bus error affecting
7433 * this device has been detected.
7434 */
7435static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
Joe Perchese8e9f692010-09-07 21:34:53 +00007436 pci_channel_state_t state)
Auke Kok9a799d72007-09-15 14:07:45 -07007437{
Alexander Duyckc60fbb02010-11-16 19:26:54 -08007438 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
7439 struct net_device *netdev = adapter->netdev;
Auke Kok9a799d72007-09-15 14:07:45 -07007440
7441 netif_device_detach(netdev);
7442
Breno Leitao3044b8d2009-05-06 10:44:26 +00007443 if (state == pci_channel_io_perm_failure)
7444 return PCI_ERS_RESULT_DISCONNECT;
7445
Auke Kok9a799d72007-09-15 14:07:45 -07007446 if (netif_running(netdev))
7447 ixgbe_down(adapter);
7448 pci_disable_device(pdev);
7449
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07007450 /* Request a slot reset. */
Auke Kok9a799d72007-09-15 14:07:45 -07007451 return PCI_ERS_RESULT_NEED_RESET;
7452}
7453
7454/**
7455 * ixgbe_io_slot_reset - called after the pci bus has been reset.
7456 * @pdev: Pointer to PCI device
7457 *
7458 * Restart the card from scratch, as if from a cold-boot.
7459 */
7460static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
7461{
Alexander Duyckc60fbb02010-11-16 19:26:54 -08007462 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08007463 pci_ers_result_t result;
7464 int err;
Auke Kok9a799d72007-09-15 14:07:45 -07007465
gouji-new9ce77662009-05-06 10:44:45 +00007466 if (pci_enable_device_mem(pdev)) {
Emil Tantilov396e7992010-07-01 20:05:12 +00007467 e_err(probe, "Cannot re-enable PCI device after reset.\n");
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08007468 result = PCI_ERS_RESULT_DISCONNECT;
7469 } else {
7470 pci_set_master(pdev);
7471 pci_restore_state(pdev);
Breno Leitaoc0e1f682009-11-10 08:37:47 +00007472 pci_save_state(pdev);
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08007473
Don Skidmoredd4d8ca2009-04-29 00:22:31 -07007474 pci_wake_from_d3(pdev, false);
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08007475
7476 ixgbe_reset(adapter);
PJ Waskiewicz88512532009-03-13 22:15:10 +00007477 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08007478 result = PCI_ERS_RESULT_RECOVERED;
Auke Kok9a799d72007-09-15 14:07:45 -07007479 }
Auke Kok9a799d72007-09-15 14:07:45 -07007480
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08007481 err = pci_cleanup_aer_uncorrect_error_status(pdev);
7482 if (err) {
Emil Tantilov849c4542010-06-03 16:53:41 +00007483 e_dev_err("pci_cleanup_aer_uncorrect_error_status "
7484 "failed 0x%0x\n", err);
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08007485 /* non-fatal, continue */
7486 }
Auke Kok9a799d72007-09-15 14:07:45 -07007487
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08007488 return result;
Auke Kok9a799d72007-09-15 14:07:45 -07007489}
7490
7491/**
7492 * ixgbe_io_resume - called when traffic can start flowing again.
7493 * @pdev: Pointer to PCI device
7494 *
7495 * This callback is called when the error recovery driver tells us that
7496 * its OK to resume normal operation.
7497 */
7498static void ixgbe_io_resume(struct pci_dev *pdev)
7499{
Alexander Duyckc60fbb02010-11-16 19:26:54 -08007500 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
7501 struct net_device *netdev = adapter->netdev;
Auke Kok9a799d72007-09-15 14:07:45 -07007502
7503 if (netif_running(netdev)) {
7504 if (ixgbe_up(adapter)) {
Emil Tantilov396e7992010-07-01 20:05:12 +00007505 e_info(probe, "ixgbe_up failed after reset\n");
Auke Kok9a799d72007-09-15 14:07:45 -07007506 return;
7507 }
7508 }
7509
7510 netif_device_attach(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07007511}
7512
7513static struct pci_error_handlers ixgbe_err_handler = {
7514 .error_detected = ixgbe_io_error_detected,
7515 .slot_reset = ixgbe_io_slot_reset,
7516 .resume = ixgbe_io_resume,
7517};
7518
7519static struct pci_driver ixgbe_driver = {
7520 .name = ixgbe_driver_name,
7521 .id_table = ixgbe_pci_tbl,
7522 .probe = ixgbe_probe,
7523 .remove = __devexit_p(ixgbe_remove),
7524#ifdef CONFIG_PM
7525 .suspend = ixgbe_suspend,
7526 .resume = ixgbe_resume,
7527#endif
7528 .shutdown = ixgbe_shutdown,
7529 .err_handler = &ixgbe_err_handler
7530};
7531
7532/**
7533 * ixgbe_init_module - Driver Registration Routine
7534 *
7535 * ixgbe_init_module is the first routine called when the driver is
7536 * loaded. All it does is register with the PCI subsystem.
7537 **/
7538static int __init ixgbe_init_module(void)
7539{
7540 int ret;
Joe Perchesc7689572010-09-07 21:35:17 +00007541 pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
Emil Tantilov849c4542010-06-03 16:53:41 +00007542 pr_info("%s\n", ixgbe_copyright);
Auke Kok9a799d72007-09-15 14:07:45 -07007543
Jeff Garzik5dd2d332008-10-16 05:09:31 -04007544#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -08007545 dca_register_notify(&dca_notifier);
Jeb Cramerbd0362d2008-03-03 15:04:02 -08007546#endif
Jeff Garzik5dd2d332008-10-16 05:09:31 -04007547
Auke Kok9a799d72007-09-15 14:07:45 -07007548 ret = pci_register_driver(&ixgbe_driver);
7549 return ret;
7550}
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07007551
Auke Kok9a799d72007-09-15 14:07:45 -07007552module_init(ixgbe_init_module);
7553
7554/**
7555 * ixgbe_exit_module - Driver Exit Cleanup Routine
7556 *
7557 * ixgbe_exit_module is called just before the driver is removed
7558 * from memory.
7559 **/
7560static void __exit ixgbe_exit_module(void)
7561{
Jeff Garzik5dd2d332008-10-16 05:09:31 -04007562#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -08007563 dca_unregister_notify(&dca_notifier);
7564#endif
Auke Kok9a799d72007-09-15 14:07:45 -07007565 pci_unregister_driver(&ixgbe_driver);
Eric Dumazet1a515022010-11-16 19:26:42 -08007566 rcu_barrier(); /* Wait for completion of call_rcu()'s */
Auke Kok9a799d72007-09-15 14:07:45 -07007567}
Jeb Cramerbd0362d2008-03-03 15:04:02 -08007568
Jeff Garzik5dd2d332008-10-16 05:09:31 -04007569#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -08007570static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
Joe Perchese8e9f692010-09-07 21:34:53 +00007571 void *p)
Jeb Cramerbd0362d2008-03-03 15:04:02 -08007572{
7573 int ret_val;
7574
7575 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
Joe Perchese8e9f692010-09-07 21:34:53 +00007576 __ixgbe_notify_dca);
Jeb Cramerbd0362d2008-03-03 15:04:02 -08007577
7578 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
7579}
Jeb Cramerbd0362d2008-03-03 15:04:02 -08007580
Alexander Duyckb4533682009-03-31 21:32:42 +00007581#endif /* CONFIG_IXGBE_DCA */
Emil Tantilov849c4542010-06-03 16:53:41 +00007582
Alexander Duyckb4533682009-03-31 21:32:42 +00007583/**
Emil Tantilov849c4542010-06-03 16:53:41 +00007584 * ixgbe_get_hw_dev return device
Alexander Duyckb4533682009-03-31 21:32:42 +00007585 * used by hardware layer to print debugging information
7586 **/
Emil Tantilov849c4542010-06-03 16:53:41 +00007587struct net_device *ixgbe_get_hw_dev(struct ixgbe_hw *hw)
Alexander Duyckb4533682009-03-31 21:32:42 +00007588{
7589 struct ixgbe_adapter *adapter = hw->back;
Emil Tantilov849c4542010-06-03 16:53:41 +00007590 return adapter->netdev;
Alexander Duyckb4533682009-03-31 21:32:42 +00007591}
7592
Auke Kok9a799d72007-09-15 14:07:45 -07007593module_exit(ixgbe_exit_module);
7594
7595/* ixgbe_main.c */