blob: 75c4982d6a3ff6dfe9f32f09a983035b377640ac [file] [log] [blame]
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001/**************************************************************************
2 *
3 * Copyright (C) 2000-2008 Alacritech, Inc. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above
12 * copyright notice, this list of conditions and the following
13 * disclaimer in the documentation and/or other materials provided
14 * with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY ALACRITECH, INC. ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ALACRITECH, INC. OR
20 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * The views and conclusions contained in the software and documentation
30 * are those of the authors and should not be interpreted as representing
31 * official policies, either expressed or implied, of Alacritech, Inc.
32 *
Mithlesh Thukral0d414722009-01-19 20:29:59 +053033 * Parts developed by LinSysSoft Sahara team
34 *
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -070035 **************************************************************************/
36
37/*
38 * FILENAME: sxg.c
39 *
40 * The SXG driver for Alacritech's 10Gbe products.
41 *
42 * NOTE: This is the standard, non-accelerated version of Alacritech's
43 * IS-NIC driver.
44 */
45
46#include <linux/kernel.h>
47#include <linux/string.h>
48#include <linux/errno.h>
49#include <linux/module.h>
50#include <linux/moduleparam.h>
51#include <linux/ioport.h>
52#include <linux/slab.h>
53#include <linux/interrupt.h>
54#include <linux/timer.h>
55#include <linux/pci.h>
56#include <linux/spinlock.h>
57#include <linux/init.h>
58#include <linux/netdevice.h>
59#include <linux/etherdevice.h>
60#include <linux/ethtool.h>
61#include <linux/skbuff.h>
62#include <linux/delay.h>
63#include <linux/types.h>
64#include <linux/dma-mapping.h>
65#include <linux/mii.h>
Mithlesh Thukral0d414722009-01-19 20:29:59 +053066#include <linux/ip.h>
67#include <linux/in.h>
68#include <linux/tcp.h>
69#include <linux/ipv6.h>
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -070070
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -070071#define SLIC_GET_STATS_ENABLED 0
72#define LINUX_FREES_ADAPTER_RESOURCES 1
73#define SXG_OFFLOAD_IP_CHECKSUM 0
74#define SXG_POWER_MANAGEMENT_ENABLED 0
75#define VPCI 0
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -070076#define ATK_DEBUG 1
77
78#include "sxg_os.h"
79#include "sxghw.h"
80#include "sxghif.h"
81#include "sxg.h"
82#include "sxgdbg.h"
83
84#include "sxgphycode.h"
Mithlesh Thukrala3915dd2009-01-19 20:28:13 +053085#define SXG_UCODE_DBG 0 /* Turn on for debugging */
86#ifdef SXG_UCODE_DBG
87#include "saharadbgdownload.c"
88#include "saharadbgdownloadB.c"
89#else
90#include "saharadownload.c"
91#include "saharadownloadB.c"
92#endif
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -070093
J.R. Mauro73b07062008-10-28 18:42:02 -040094static int sxg_allocate_buffer_memory(struct adapter_t *adapter, u32 Size,
Mithlesh Thukral942798b2009-01-05 21:14:34 +053095 enum sxg_buffer_type BufferType);
Mithlesh Thukral0d414722009-01-19 20:29:59 +053096static int sxg_allocate_rcvblock_complete(struct adapter_t *adapter,
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +053097 void *RcvBlock,
98 dma_addr_t PhysicalAddress,
99 u32 Length);
J.R. Mauro73b07062008-10-28 18:42:02 -0400100static void sxg_allocate_sgl_buffer_complete(struct adapter_t *adapter,
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530101 struct sxg_scatter_gather *SxgSgl,
J.R. Mauro5c7514e2008-10-05 20:38:52 -0400102 dma_addr_t PhysicalAddress,
103 u32 Length);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700104
105static void sxg_mcast_init_crc32(void);
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530106static int sxg_entry_open(struct net_device *dev);
Mithlesh Thukral0d414722009-01-19 20:29:59 +0530107static int sxg_second_open(struct net_device * dev);
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530108static int sxg_entry_halt(struct net_device *dev);
109static int sxg_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
110static int sxg_send_packets(struct sk_buff *skb, struct net_device *dev);
J.R. Mauro73b07062008-10-28 18:42:02 -0400111static int sxg_transmit_packet(struct adapter_t *adapter, struct sk_buff *skb);
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +0530112static int sxg_dumb_sgl(struct sxg_x64_sgl *pSgl,
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530113 struct sxg_scatter_gather *SxgSgl);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700114
Mithlesh Thukralb62a2942009-01-30 20:19:03 +0530115static void sxg_handle_interrupt(struct adapter_t *adapter, int *work_done,
116 int budget);
117static void sxg_interrupt(struct adapter_t *adapter);
118static int sxg_poll(struct napi_struct *napi, int budget);
J.R. Mauro73b07062008-10-28 18:42:02 -0400119static int sxg_process_isr(struct adapter_t *adapter, u32 MessageId);
Mithlesh Thukralb62a2942009-01-30 20:19:03 +0530120static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId,
121 int *sxg_napi_continue, int *work_done, int budget);
Mithlesh Thukralc5e5cf52009-02-06 19:31:40 +0530122static void sxg_complete_slow_send(struct adapter_t *adapter);
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530123static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter,
124 struct sxg_event *Event);
J.R. Mauro73b07062008-10-28 18:42:02 -0400125static void sxg_process_rcv_error(struct adapter_t *adapter, u32 ErrorStatus);
126static bool sxg_mac_filter(struct adapter_t *adapter,
127 struct ether_header *EtherHdr, ushort length);
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +0530128static struct net_device_stats *sxg_get_stats(struct net_device * dev);
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +0530129void sxg_free_resources(struct adapter_t *adapter);
130void sxg_free_rcvblocks(struct adapter_t *adapter);
131void sxg_free_sgl_buffers(struct adapter_t *adapter);
132void sxg_unmap_resources(struct adapter_t *adapter);
133void sxg_free_mcast_addrs(struct adapter_t *adapter);
134void sxg_collect_statistics(struct adapter_t *adapter);
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +0530135
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -0700136#define XXXTODO 0
137
Greg Kroah-Hartman96e70882009-01-21 08:17:45 -0800138#if XXXTODO
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530139static int sxg_mac_set_address(struct net_device *dev, void *ptr);
Greg Kroah-Hartman96e70882009-01-21 08:17:45 -0800140#endif
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530141static void sxg_mcast_set_list(struct net_device *dev);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700142
Mithlesh Thukral54aed112009-01-19 20:27:17 +0530143static int sxg_adapter_set_hwaddr(struct adapter_t *adapter);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700144
J.R. Mauro73b07062008-10-28 18:42:02 -0400145static int sxg_initialize_adapter(struct adapter_t *adapter);
146static void sxg_stock_rcv_buffers(struct adapter_t *adapter);
147static void sxg_complete_descriptor_blocks(struct adapter_t *adapter,
J.R. Mauro5c7514e2008-10-05 20:38:52 -0400148 unsigned char Index);
Mithlesh Thukral7c66b142009-02-06 19:30:40 +0530149int sxg_change_mtu (struct net_device *netdev, int new_mtu);
J.R. Mauro73b07062008-10-28 18:42:02 -0400150static int sxg_initialize_link(struct adapter_t *adapter);
151static int sxg_phy_init(struct adapter_t *adapter);
152static void sxg_link_event(struct adapter_t *adapter);
153static enum SXG_LINK_STATE sxg_get_link_state(struct adapter_t *adapter);
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530154static void sxg_link_state(struct adapter_t *adapter,
155 enum SXG_LINK_STATE LinkState);
J.R. Mauro73b07062008-10-28 18:42:02 -0400156static int sxg_write_mdio_reg(struct adapter_t *adapter,
J.R. Mauro5c7514e2008-10-05 20:38:52 -0400157 u32 DevAddr, u32 RegAddr, u32 Value);
J.R. Mauro73b07062008-10-28 18:42:02 -0400158static int sxg_read_mdio_reg(struct adapter_t *adapter,
J.R. Mauro5c7514e2008-10-05 20:38:52 -0400159 u32 DevAddr, u32 RegAddr, u32 *pValue);
Mithlesh Thukralb040b072009-01-28 07:08:11 +0530160static void sxg_set_mcast_addr(struct adapter_t *adapter);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700161
162static unsigned int sxg_first_init = 1;
163static char *sxg_banner =
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530164 "Alacritech SLIC Technology(tm) Server and Storage \
165 10Gbe Accelerator (Non-Accelerated)\n";
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700166
167static int sxg_debug = 1;
168static int debug = -1;
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530169static struct net_device *head_netdevice = NULL;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700170
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530171static struct sxgbase_driver sxg_global = {
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700172 .dynamic_intagg = 1,
173};
174static int intagg_delay = 100;
175static u32 dynamic_intagg = 0;
176
Mithlesh Thukral54aed112009-01-19 20:27:17 +0530177char sxg_driver_name[] = "sxg_nic";
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700178#define DRV_AUTHOR "Alacritech, Inc. Engineering"
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530179#define DRV_DESCRIPTION \
180 "Alacritech SLIC Techonology(tm) Non-Accelerated 10Gbe Driver"
181#define DRV_COPYRIGHT \
182 "Copyright 2000-2008 Alacritech, Inc. All rights reserved."
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700183
184MODULE_AUTHOR(DRV_AUTHOR);
185MODULE_DESCRIPTION(DRV_DESCRIPTION);
186MODULE_LICENSE("GPL");
187
188module_param(dynamic_intagg, int, 0);
189MODULE_PARM_DESC(dynamic_intagg, "Dynamic Interrupt Aggregation Setting");
190module_param(intagg_delay, int, 0);
191MODULE_PARM_DESC(intagg_delay, "uSec Interrupt Aggregation Delay");
192
193static struct pci_device_id sxg_pci_tbl[] __devinitdata = {
194 {PCI_DEVICE(SXG_VENDOR_ID, SXG_DEVICE_ID)},
195 {0,}
196};
J.R. Mauro5c7514e2008-10-05 20:38:52 -0400197
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700198MODULE_DEVICE_TABLE(pci, sxg_pci_tbl);
199
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700200static inline void sxg_reg32_write(void __iomem *reg, u32 value, bool flush)
201{
202 writel(value, reg);
203 if (flush)
204 mb();
205}
206
J.R. Mauro73b07062008-10-28 18:42:02 -0400207static inline void sxg_reg64_write(struct adapter_t *adapter, void __iomem *reg,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700208 u64 value, u32 cpu)
209{
210 u32 value_high = (u32) (value >> 32);
211 u32 value_low = (u32) (value & 0x00000000FFFFFFFF);
212 unsigned long flags;
213
214 spin_lock_irqsave(&adapter->Bit64RegLock, flags);
215 writel(value_high, (void __iomem *)(&adapter->UcodeRegs[cpu].Upper));
216 writel(value_low, reg);
217 spin_unlock_irqrestore(&adapter->Bit64RegLock, flags);
218}
219
220static void sxg_init_driver(void)
221{
222 if (sxg_first_init) {
223 DBG_ERROR("sxg: %s sxg_first_init set jiffies[%lx]\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -0700224 __func__, jiffies);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700225 sxg_first_init = 0;
226 spin_lock_init(&sxg_global.driver_lock);
227 }
228}
229
J.R. Mauro73b07062008-10-28 18:42:02 -0400230static void sxg_dbg_macaddrs(struct adapter_t *adapter)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700231{
232 DBG_ERROR(" (%s) curr %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
233 adapter->netdev->name, adapter->currmacaddr[0],
234 adapter->currmacaddr[1], adapter->currmacaddr[2],
235 adapter->currmacaddr[3], adapter->currmacaddr[4],
236 adapter->currmacaddr[5]);
237 DBG_ERROR(" (%s) mac %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
238 adapter->netdev->name, adapter->macaddr[0],
239 adapter->macaddr[1], adapter->macaddr[2],
240 adapter->macaddr[3], adapter->macaddr[4],
241 adapter->macaddr[5]);
242 return;
243}
244
J.R. Maurob243c4a2008-10-20 19:28:58 -0400245/* SXG Globals */
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530246static struct sxg_driver SxgDriver;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700247
248#ifdef ATKDBG
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530249static struct sxg_trace_buffer LSxgTraceBuffer;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700250#endif /* ATKDBG */
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530251static struct sxg_trace_buffer *SxgTraceBuffer = NULL;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700252
253/*
254 * sxg_download_microcode
255 *
256 * Download Microcode to Sahara adapter
257 *
258 * Arguments -
259 * adapter - A pointer to our adapter structure
260 * UcodeSel - microcode file selection
261 *
262 * Return
263 * int
264 */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530265static bool sxg_download_microcode(struct adapter_t *adapter,
266 enum SXG_UCODE_SEL UcodeSel)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700267{
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530268 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700269 u32 Section;
270 u32 ThisSectionSize;
J.R. Mauro5c7514e2008-10-05 20:38:52 -0400271 u32 *Instruction = NULL;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700272 u32 BaseAddress, AddressOffset, Address;
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530273 /* u32 Failure; */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700274 u32 ValueRead;
275 u32 i;
276 u32 numSections = 0;
277 u32 sectionSize[16];
278 u32 sectionStart[16];
279
280 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DnldUcod",
281 adapter, 0, 0, 0);
Harvey Harrisone88bd232008-10-17 14:46:10 -0700282 DBG_ERROR("sxg: %s ENTER\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700283
284 switch (UcodeSel) {
J.R. Maurob243c4a2008-10-20 19:28:58 -0400285 case SXG_UCODE_SAHARA: /* Sahara operational ucode */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700286 numSections = SNumSections;
287 for (i = 0; i < numSections; i++) {
288 sectionSize[i] = SSectionSize[i];
289 sectionStart[i] = SSectionStart[i];
290 }
291 break;
292 default:
293 printk(KERN_ERR KBUILD_MODNAME
294 ": Woah, big error with the microcode!\n");
295 break;
296 }
297
298 DBG_ERROR("sxg: RESET THE CARD\n");
J.R. Maurob243c4a2008-10-20 19:28:58 -0400299 /* First, reset the card */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700300 WRITE_REG(HwRegs->Reset, 0xDEAD, FLUSH);
301
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +0530302 /*
303 * Download each section of the microcode as specified in
304 * its download file. The *download.c file is generated using
305 * the saharaobjtoc facility which converts the metastep .obj
306 * file to a .c file which contains a two dimentional array.
307 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700308 for (Section = 0; Section < numSections; Section++) {
309 DBG_ERROR("sxg: SECTION # %d\n", Section);
310 switch (UcodeSel) {
311 case SXG_UCODE_SAHARA:
312 Instruction = (u32 *) & SaharaUCode[Section][0];
313 break;
314 default:
315 ASSERT(0);
316 break;
317 }
318 BaseAddress = sectionStart[Section];
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530319 /* Size in instructions */
320 ThisSectionSize = sectionSize[Section] / 12;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700321 for (AddressOffset = 0; AddressOffset < ThisSectionSize;
322 AddressOffset++) {
323 Address = BaseAddress + AddressOffset;
324 ASSERT((Address & ~MICROCODE_ADDRESS_MASK) == 0);
J.R. Maurob243c4a2008-10-20 19:28:58 -0400325 /* Write instruction bits 31 - 0 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700326 WRITE_REG(HwRegs->UcodeDataLow, *Instruction, FLUSH);
J.R. Maurob243c4a2008-10-20 19:28:58 -0400327 /* Write instruction bits 63-32 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700328 WRITE_REG(HwRegs->UcodeDataMiddle, *(Instruction + 1),
329 FLUSH);
J.R. Maurob243c4a2008-10-20 19:28:58 -0400330 /* Write instruction bits 95-64 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700331 WRITE_REG(HwRegs->UcodeDataHigh, *(Instruction + 2),
332 FLUSH);
J.R. Maurob243c4a2008-10-20 19:28:58 -0400333 /* Write instruction address with the WRITE bit set */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700334 WRITE_REG(HwRegs->UcodeAddr,
335 (Address | MICROCODE_ADDRESS_WRITE), FLUSH);
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +0530336 /*
337 * Sahara bug in the ucode download logic - the write to DataLow
338 * for the next instruction could get corrupted. To avoid this,
339 * write to DataLow again for this instruction (which may get
340 * corrupted, but it doesn't matter), then increment the address
341 * and write the data for the next instruction to DataLow. That
342 * write should succeed.
343 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700344 WRITE_REG(HwRegs->UcodeDataLow, *Instruction, TRUE);
J.R. Maurob243c4a2008-10-20 19:28:58 -0400345 /* Advance 3 u32S to start of next instruction */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700346 Instruction += 3;
347 }
348 }
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +0530349 /*
350 * Now repeat the entire operation reading the instruction back and
351 * checking for parity errors
352 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700353 for (Section = 0; Section < numSections; Section++) {
354 DBG_ERROR("sxg: check SECTION # %d\n", Section);
355 switch (UcodeSel) {
356 case SXG_UCODE_SAHARA:
357 Instruction = (u32 *) & SaharaUCode[Section][0];
358 break;
359 default:
360 ASSERT(0);
361 break;
362 }
363 BaseAddress = sectionStart[Section];
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530364 /* Size in instructions */
365 ThisSectionSize = sectionSize[Section] / 12;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700366 for (AddressOffset = 0; AddressOffset < ThisSectionSize;
367 AddressOffset++) {
368 Address = BaseAddress + AddressOffset;
J.R. Maurob243c4a2008-10-20 19:28:58 -0400369 /* Write the address with the READ bit set */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700370 WRITE_REG(HwRegs->UcodeAddr,
371 (Address | MICROCODE_ADDRESS_READ), FLUSH);
J.R. Maurob243c4a2008-10-20 19:28:58 -0400372 /* Read it back and check parity bit. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700373 READ_REG(HwRegs->UcodeAddr, ValueRead);
374 if (ValueRead & MICROCODE_ADDRESS_PARITY) {
375 DBG_ERROR("sxg: %s PARITY ERROR\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -0700376 __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700377
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530378 return FALSE; /* Parity error */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700379 }
380 ASSERT((ValueRead & MICROCODE_ADDRESS_MASK) == Address);
J.R. Maurob243c4a2008-10-20 19:28:58 -0400381 /* Read the instruction back and compare */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700382 READ_REG(HwRegs->UcodeDataLow, ValueRead);
383 if (ValueRead != *Instruction) {
384 DBG_ERROR("sxg: %s MISCOMPARE LOW\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -0700385 __func__);
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530386 return FALSE; /* Miscompare */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700387 }
388 READ_REG(HwRegs->UcodeDataMiddle, ValueRead);
389 if (ValueRead != *(Instruction + 1)) {
390 DBG_ERROR("sxg: %s MISCOMPARE MIDDLE\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -0700391 __func__);
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530392 return FALSE; /* Miscompare */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700393 }
394 READ_REG(HwRegs->UcodeDataHigh, ValueRead);
395 if (ValueRead != *(Instruction + 2)) {
396 DBG_ERROR("sxg: %s MISCOMPARE HIGH\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -0700397 __func__);
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530398 return FALSE; /* Miscompare */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700399 }
J.R. Maurob243c4a2008-10-20 19:28:58 -0400400 /* Advance 3 u32S to start of next instruction */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700401 Instruction += 3;
402 }
403 }
404
J.R. Maurob243c4a2008-10-20 19:28:58 -0400405 /* Everything OK, Go. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700406 WRITE_REG(HwRegs->UcodeAddr, MICROCODE_ADDRESS_GO, FLUSH);
407
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +0530408 /*
409 * Poll the CardUp register to wait for microcode to initialize
410 * Give up after 10,000 attemps (500ms).
411 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700412 for (i = 0; i < 10000; i++) {
413 udelay(50);
414 READ_REG(adapter->UcodeRegs[0].CardUp, ValueRead);
415 if (ValueRead == 0xCAFE) {
Harvey Harrisone88bd232008-10-17 14:46:10 -0700416 DBG_ERROR("sxg: %s BOO YA 0xCAFE\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700417 break;
418 }
419 }
420 if (i == 10000) {
Harvey Harrisone88bd232008-10-17 14:46:10 -0700421 DBG_ERROR("sxg: %s TIMEOUT\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700422
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530423 return FALSE; /* Timeout */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700424 }
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +0530425 /*
426 * Now write the LoadSync register. This is used to
427 * synchronize with the card so it can scribble on the memory
428 * that contained 0xCAFE from the "CardUp" step above
429 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700430 if (UcodeSel == SXG_UCODE_SAHARA) {
431 WRITE_REG(adapter->UcodeRegs[0].LoadSync, 0, FLUSH);
432 }
433
434 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDnldUcd",
435 adapter, 0, 0, 0);
Harvey Harrisone88bd232008-10-17 14:46:10 -0700436 DBG_ERROR("sxg: %s EXIT\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700437
438 return (TRUE);
439}
440
441/*
442 * sxg_allocate_resources - Allocate memory and locks
443 *
444 * Arguments -
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530445 * adapter - A pointer to our adapter structure
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700446 *
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530447 * Return - int
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700448 */
J.R. Mauro73b07062008-10-28 18:42:02 -0400449static int sxg_allocate_resources(struct adapter_t *adapter)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700450{
451 int status;
452 u32 i;
453 u32 RssIds, IsrCount;
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530454 /* struct sxg_xmt_ring *XmtRing; */
455 /* struct sxg_rcv_ring *RcvRing; */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700456
Harvey Harrisone88bd232008-10-17 14:46:10 -0700457 DBG_ERROR("%s ENTER\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700458
459 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocRes",
460 adapter, 0, 0, 0);
461
J.R. Maurob243c4a2008-10-20 19:28:58 -0400462 /* Windows tells us how many CPUs it plans to use for */
463 /* RSS */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700464 RssIds = SXG_RSS_CPU_COUNT(adapter);
465 IsrCount = adapter->MsiEnabled ? RssIds : 1;
466
Harvey Harrisone88bd232008-10-17 14:46:10 -0700467 DBG_ERROR("%s Setup the spinlocks\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700468
J.R. Maurob243c4a2008-10-20 19:28:58 -0400469 /* Allocate spinlocks and initialize listheads first. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700470 spin_lock_init(&adapter->RcvQLock);
471 spin_lock_init(&adapter->SglQLock);
472 spin_lock_init(&adapter->XmtZeroLock);
473 spin_lock_init(&adapter->Bit64RegLock);
474 spin_lock_init(&adapter->AdapterLock);
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +0530475 atomic_set(&adapter->pending_allocations, 0);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700476
Harvey Harrisone88bd232008-10-17 14:46:10 -0700477 DBG_ERROR("%s Setup the lists\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700478
479 InitializeListHead(&adapter->FreeRcvBuffers);
480 InitializeListHead(&adapter->FreeRcvBlocks);
481 InitializeListHead(&adapter->AllRcvBlocks);
482 InitializeListHead(&adapter->FreeSglBuffers);
483 InitializeListHead(&adapter->AllSglBuffers);
484
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +0530485 /*
486 * Mark these basic allocations done. This flags essentially
487 * tells the SxgFreeResources routine that it can grab spinlocks
488 * and reference listheads.
489 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700490 adapter->BasicAllocations = TRUE;
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +0530491 /*
492 * Main allocation loop. Start with the maximum supported by
493 * the microcode and back off if memory allocation
494 * fails. If we hit a minimum, fail.
495 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700496
497 for (;;) {
Greg Kroah-Hartmand78404c2008-10-21 10:41:45 -0700498 DBG_ERROR("%s Allocate XmtRings size[%x]\n", __func__,
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530499 (unsigned int)(sizeof(struct sxg_xmt_ring) * 1));
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700500
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +0530501 /*
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530502 * Start with big items first - receive and transmit rings.
503 * At the moment I'm going to keep the ring size fixed and
504 * adjust the TCBs if we fail. Later we might
505 * consider reducing the ring size as well..
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +0530506 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700507 adapter->XmtRings = pci_alloc_consistent(adapter->pcidev,
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530508 sizeof(struct sxg_xmt_ring) *
509 1,
510 &adapter->PXmtRings);
Harvey Harrisone88bd232008-10-17 14:46:10 -0700511 DBG_ERROR("%s XmtRings[%p]\n", __func__, adapter->XmtRings);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700512
513 if (!adapter->XmtRings) {
514 goto per_tcb_allocation_failed;
515 }
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530516 memset(adapter->XmtRings, 0, sizeof(struct sxg_xmt_ring) * 1);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700517
Greg Kroah-Hartmand78404c2008-10-21 10:41:45 -0700518 DBG_ERROR("%s Allocate RcvRings size[%x]\n", __func__,
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530519 (unsigned int)(sizeof(struct sxg_rcv_ring) * 1));
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700520 adapter->RcvRings =
521 pci_alloc_consistent(adapter->pcidev,
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530522 sizeof(struct sxg_rcv_ring) * 1,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700523 &adapter->PRcvRings);
Harvey Harrisone88bd232008-10-17 14:46:10 -0700524 DBG_ERROR("%s RcvRings[%p]\n", __func__, adapter->RcvRings);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700525 if (!adapter->RcvRings) {
526 goto per_tcb_allocation_failed;
527 }
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530528 memset(adapter->RcvRings, 0, sizeof(struct sxg_rcv_ring) * 1);
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +0530529 adapter->ucode_stats = kzalloc(sizeof(struct sxg_ucode_stats), GFP_ATOMIC);
530 adapter->pucode_stats = pci_map_single(adapter->pcidev,
531 adapter->ucode_stats,
532 sizeof(struct sxg_ucode_stats),
533 PCI_DMA_FROMDEVICE);
534// memset(adapter->ucode_stats, 0, sizeof(struct sxg_ucode_stats));
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700535 break;
536
537 per_tcb_allocation_failed:
J.R. Maurob243c4a2008-10-20 19:28:58 -0400538 /* an allocation failed. Free any successful allocations. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700539 if (adapter->XmtRings) {
540 pci_free_consistent(adapter->pcidev,
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530541 sizeof(struct sxg_xmt_ring) * 1,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700542 adapter->XmtRings,
543 adapter->PXmtRings);
544 adapter->XmtRings = NULL;
545 }
546 if (adapter->RcvRings) {
547 pci_free_consistent(adapter->pcidev,
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530548 sizeof(struct sxg_rcv_ring) * 1,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700549 adapter->RcvRings,
550 adapter->PRcvRings);
551 adapter->RcvRings = NULL;
552 }
J.R. Maurob243c4a2008-10-20 19:28:58 -0400553 /* Loop around and try again.... */
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +0530554 if (adapter->ucode_stats) {
555 pci_unmap_single(adapter->pcidev,
556 sizeof(struct sxg_ucode_stats),
557 adapter->pucode_stats, PCI_DMA_FROMDEVICE);
558 adapter->ucode_stats = NULL;
559 }
560
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700561 }
562
Harvey Harrisone88bd232008-10-17 14:46:10 -0700563 DBG_ERROR("%s Initialize RCV ZERO and XMT ZERO rings\n", __func__);
J.R. Maurob243c4a2008-10-20 19:28:58 -0400564 /* Initialize rcv zero and xmt zero rings */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700565 SXG_INITIALIZE_RING(adapter->RcvRingZeroInfo, SXG_RCV_RING_SIZE);
566 SXG_INITIALIZE_RING(adapter->XmtRingZeroInfo, SXG_XMT_RING_SIZE);
567
J.R. Maurob243c4a2008-10-20 19:28:58 -0400568 /* Sanity check receive data structure format */
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +0530569 /* ASSERT((adapter->ReceiveBufferSize == SXG_RCV_DATA_BUFFER_SIZE) ||
570 (adapter->ReceiveBufferSize == SXG_RCV_JUMBO_BUFFER_SIZE)); */
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530571 ASSERT(sizeof(struct sxg_rcv_descriptor_block) ==
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700572 SXG_RCV_DESCRIPTOR_BLOCK_SIZE);
573
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +0530574 /*
575 * Allocate receive data buffers. We allocate a block of buffers and
576 * a corresponding descriptor block at once. See sxghw.h:SXG_RCV_BLOCK
577 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700578 for (i = 0; i < SXG_INITIAL_RCV_DATA_BUFFERS;
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530579 i += SXG_RCV_DESCRIPTORS_PER_BLOCK) {
Mithlesh Thukral0d414722009-01-19 20:29:59 +0530580 status = sxg_allocate_buffer_memory(adapter,
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +0530581 SXG_RCV_BLOCK_SIZE(SXG_RCV_DATA_HDR_SIZE),
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700582 SXG_BUFFER_TYPE_RCV);
Mithlesh Thukral0d414722009-01-19 20:29:59 +0530583 if (status != STATUS_SUCCESS)
584 return status;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700585 }
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +0530586 /*
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530587 * NBL resource allocation can fail in the 'AllocateComplete' routine,
588 * which doesn't return status. Make sure we got the number of buffers
589 * we requested
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +0530590 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700591 if (adapter->FreeRcvBufferCount < SXG_INITIAL_RCV_DATA_BUFFERS) {
592 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF6",
593 adapter, adapter->FreeRcvBufferCount, SXG_MAX_ENTRIES,
594 0);
595 return (STATUS_RESOURCES);
596 }
597
Greg Kroah-Hartmand78404c2008-10-21 10:41:45 -0700598 DBG_ERROR("%s Allocate EventRings size[%x]\n", __func__,
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530599 (unsigned int)(sizeof(struct sxg_event_ring) * RssIds));
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700600
J.R. Maurob243c4a2008-10-20 19:28:58 -0400601 /* Allocate event queues. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700602 adapter->EventRings = pci_alloc_consistent(adapter->pcidev,
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530603 sizeof(struct sxg_event_ring) *
604 RssIds,
605 &adapter->PEventRings);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700606
607 if (!adapter->EventRings) {
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530608 /* Caller will call SxgFreeAdapter to clean up above
609 * allocations */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700610 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF8",
611 adapter, SXG_MAX_ENTRIES, 0, 0);
612 status = STATUS_RESOURCES;
613 goto per_tcb_allocation_failed;
614 }
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530615 memset(adapter->EventRings, 0, sizeof(struct sxg_event_ring) * RssIds);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700616
Harvey Harrisone88bd232008-10-17 14:46:10 -0700617 DBG_ERROR("%s Allocate ISR size[%x]\n", __func__, IsrCount);
J.R. Maurob243c4a2008-10-20 19:28:58 -0400618 /* Allocate ISR */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700619 adapter->Isr = pci_alloc_consistent(adapter->pcidev,
620 IsrCount, &adapter->PIsr);
621 if (!adapter->Isr) {
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530622 /* Caller will call SxgFreeAdapter to clean up above
623 * allocations */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700624 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF9",
625 adapter, SXG_MAX_ENTRIES, 0, 0);
626 status = STATUS_RESOURCES;
627 goto per_tcb_allocation_failed;
628 }
629 memset(adapter->Isr, 0, sizeof(u32) * IsrCount);
630
Greg Kroah-Hartmand78404c2008-10-21 10:41:45 -0700631 DBG_ERROR("%s Allocate shared XMT ring zero index location size[%x]\n",
632 __func__, (unsigned int)sizeof(u32));
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700633
J.R. Maurob243c4a2008-10-20 19:28:58 -0400634 /* Allocate shared XMT ring zero index location */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700635 adapter->XmtRingZeroIndex = pci_alloc_consistent(adapter->pcidev,
636 sizeof(u32),
637 &adapter->
638 PXmtRingZeroIndex);
639 if (!adapter->XmtRingZeroIndex) {
640 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF10",
641 adapter, SXG_MAX_ENTRIES, 0, 0);
642 status = STATUS_RESOURCES;
643 goto per_tcb_allocation_failed;
644 }
645 memset(adapter->XmtRingZeroIndex, 0, sizeof(u32));
646
647 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlcResS",
648 adapter, SXG_MAX_ENTRIES, 0, 0);
649
Mithlesh Thukral0d414722009-01-19 20:29:59 +0530650 return status;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700651}
652
653/*
654 * sxg_config_pci -
655 *
656 * Set up PCI Configuration space
657 *
658 * Arguments -
659 * pcidev - A pointer to our adapter structure
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700660 */
661static void sxg_config_pci(struct pci_dev *pcidev)
662{
663 u16 pci_command;
664 u16 new_command;
665
666 pci_read_config_word(pcidev, PCI_COMMAND, &pci_command);
Harvey Harrisone88bd232008-10-17 14:46:10 -0700667 DBG_ERROR("sxg: %s PCI command[%4.4x]\n", __func__, pci_command);
J.R. Maurob243c4a2008-10-20 19:28:58 -0400668 /* Set the command register */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530669 new_command = pci_command | (
670 /* Memory Space Enable */
671 PCI_COMMAND_MEMORY |
672 /* Bus master enable */
673 PCI_COMMAND_MASTER |
674 /* Memory write and invalidate */
675 PCI_COMMAND_INVALIDATE |
676 /* Parity error response */
677 PCI_COMMAND_PARITY |
678 /* System ERR */
679 PCI_COMMAND_SERR |
680 /* Fast back-to-back */
681 PCI_COMMAND_FAST_BACK);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700682 if (pci_command != new_command) {
683 DBG_ERROR("%s -- Updating PCI COMMAND register %4.4x->%4.4x.\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -0700684 __func__, pci_command, new_command);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700685 pci_write_config_word(pcidev, PCI_COMMAND, new_command);
686 }
687}
688
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +0530689/*
690 * sxg_read_config
691 * @adapter : Pointer to the adapter structure for the card
692 * This function will read the configuration data from EEPROM/FLASH
693 */
694static inline int sxg_read_config(struct adapter_t *adapter)
695{
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +0530696 /* struct sxg_config data; */
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530697 struct sw_cfg_data *data;
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +0530698 dma_addr_t p_addr;
699 unsigned long status;
700 unsigned long i;
701
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530702 data = pci_alloc_consistent(adapter->pcidev,
703 sizeof(struct sw_cfg_data), &p_addr);
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +0530704 if(!data) {
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +0530705 /*
706 * We cant get even this much memory. Raise a hell
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +0530707 * Get out of here
708 */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530709 printk(KERN_ERR"%s : Could not allocate memory for reading \
710 EEPROM\n", __FUNCTION__);
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +0530711 return -ENOMEM;
712 }
713
714 WRITE_REG(adapter->UcodeRegs[0].ConfigStat, SXG_CFG_TIMEOUT, TRUE);
715
716 WRITE_REG64(adapter, adapter->UcodeRegs[0].Config, p_addr, 0);
717 for(i=0; i<1000; i++) {
718 READ_REG(adapter->UcodeRegs[0].ConfigStat, status);
719 if (status != SXG_CFG_TIMEOUT)
720 break;
721 mdelay(1); /* Do we really need this */
722 }
723
724 switch(status) {
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530725 /* Config read from EEPROM succeeded */
726 case SXG_CFG_LOAD_EEPROM:
727 /* Config read from Flash succeeded */
728 case SXG_CFG_LOAD_FLASH:
729 /* Copy the MAC address to adapter structure */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530730 /* TODO: We are not doing the remaining part : FRU,
731 * etc
732 */
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +0530733 memcpy(adapter->macaddr, data->MacAddr[0].MacAddr,
734 sizeof(struct sxg_config_mac));
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530735 break;
736 case SXG_CFG_TIMEOUT:
737 case SXG_CFG_LOAD_INVALID:
738 case SXG_CFG_LOAD_ERROR:
739 default: /* Fix default handler later */
740 printk(KERN_WARNING"%s : We could not read the config \
741 word. Status = %ld\n", __FUNCTION__, status);
742 break;
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +0530743 }
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530744 pci_free_consistent(adapter->pcidev, sizeof(struct sw_cfg_data), data,
745 p_addr);
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +0530746 if (adapter->netdev) {
747 memcpy(adapter->netdev->dev_addr, adapter->currmacaddr, 6);
748 memcpy(adapter->netdev->perm_addr, adapter->currmacaddr, 6);
749 }
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +0530750 sxg_dbg_macaddrs(adapter);
751
752 return status;
753}
754
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700755static int sxg_entry_probe(struct pci_dev *pcidev,
756 const struct pci_device_id *pci_tbl_entry)
757{
758 static int did_version = 0;
759 int err;
760 struct net_device *netdev;
J.R. Mauro73b07062008-10-28 18:42:02 -0400761 struct adapter_t *adapter;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700762 void __iomem *memmapped_ioaddr;
763 u32 status = 0;
764 ulong mmio_start = 0;
765 ulong mmio_len = 0;
766
767 DBG_ERROR("sxg: %s 2.6 VERSION ENTER jiffies[%lx] cpu %d\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -0700768 __func__, jiffies, smp_processor_id());
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700769
J.R. Maurob243c4a2008-10-20 19:28:58 -0400770 /* Initialize trace buffer */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700771#ifdef ATKDBG
772 SxgTraceBuffer = &LSxgTraceBuffer;
773 SXG_TRACE_INIT(SxgTraceBuffer, TRACE_NOISY);
774#endif
775
776 sxg_global.dynamic_intagg = dynamic_intagg;
777
778 err = pci_enable_device(pcidev);
779
780 DBG_ERROR("Call pci_enable_device(%p) status[%x]\n", pcidev, err);
781 if (err) {
782 return err;
783 }
784
785 if (sxg_debug > 0 && did_version++ == 0) {
786 printk(KERN_INFO "%s\n", sxg_banner);
Mithlesh Thukral371d7a92009-01-19 20:22:34 +0530787 printk(KERN_INFO "%s\n", SXG_DRV_VERSION);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700788 }
789
790 if (!(err = pci_set_dma_mask(pcidev, DMA_64BIT_MASK))) {
791 DBG_ERROR("pci_set_dma_mask(DMA_64BIT_MASK) successful\n");
792 } else {
793 if ((err = pci_set_dma_mask(pcidev, DMA_32BIT_MASK))) {
794 DBG_ERROR
795 ("No usable DMA configuration, aborting err[%x]\n",
796 err);
797 return err;
798 }
799 DBG_ERROR("pci_set_dma_mask(DMA_32BIT_MASK) successful\n");
800 }
801
802 DBG_ERROR("Call pci_request_regions\n");
803
Mithlesh Thukral371d7a92009-01-19 20:22:34 +0530804 err = pci_request_regions(pcidev, sxg_driver_name);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700805 if (err) {
806 DBG_ERROR("pci_request_regions FAILED err[%x]\n", err);
807 return err;
808 }
809
810 DBG_ERROR("call pci_set_master\n");
811 pci_set_master(pcidev);
812
813 DBG_ERROR("call alloc_etherdev\n");
J.R. Mauro73b07062008-10-28 18:42:02 -0400814 netdev = alloc_etherdev(sizeof(struct adapter_t));
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700815 if (!netdev) {
816 err = -ENOMEM;
817 goto err_out_exit_sxg_probe;
818 }
819 DBG_ERROR("alloc_etherdev for slic netdev[%p]\n", netdev);
820
821 SET_NETDEV_DEV(netdev, &pcidev->dev);
822
823 pci_set_drvdata(pcidev, netdev);
824 adapter = netdev_priv(netdev);
825 adapter->netdev = netdev;
826 adapter->pcidev = pcidev;
827
828 mmio_start = pci_resource_start(pcidev, 0);
829 mmio_len = pci_resource_len(pcidev, 0);
830
831 DBG_ERROR("sxg: call ioremap(mmio_start[%lx], mmio_len[%lx])\n",
832 mmio_start, mmio_len);
833
834 memmapped_ioaddr = ioremap(mmio_start, mmio_len);
Harvey Harrisone88bd232008-10-17 14:46:10 -0700835 DBG_ERROR("sxg: %s MEMMAPPED_IOADDR [%p]\n", __func__,
J.R. Mauro5c7514e2008-10-05 20:38:52 -0400836 memmapped_ioaddr);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700837 if (!memmapped_ioaddr) {
838 DBG_ERROR("%s cannot remap MMIO region %lx @ %lx\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -0700839 __func__, mmio_len, mmio_start);
Mithlesh Thukral0d414722009-01-19 20:29:59 +0530840 goto err_out_free_mmio_region_0;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700841 }
842
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530843 DBG_ERROR("sxg: %s found Alacritech SXG PCI, MMIO at %p, start[%lx] \
844 len[%lx], IRQ %d.\n", __func__, memmapped_ioaddr, mmio_start,
845 mmio_len, pcidev->irq);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700846
J.R. Mauro5c7514e2008-10-05 20:38:52 -0400847 adapter->HwRegs = (void *)memmapped_ioaddr;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700848 adapter->base_addr = memmapped_ioaddr;
849
850 mmio_start = pci_resource_start(pcidev, 2);
851 mmio_len = pci_resource_len(pcidev, 2);
852
853 DBG_ERROR("sxg: call ioremap(mmio_start[%lx], mmio_len[%lx])\n",
854 mmio_start, mmio_len);
855
856 memmapped_ioaddr = ioremap(mmio_start, mmio_len);
J.R. Mauro5c7514e2008-10-05 20:38:52 -0400857 DBG_ERROR("sxg: %s MEMMAPPED_IOADDR [%p]\n", __func__,
858 memmapped_ioaddr);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700859 if (!memmapped_ioaddr) {
860 DBG_ERROR("%s cannot remap MMIO region %lx @ %lx\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -0700861 __func__, mmio_len, mmio_start);
Mithlesh Thukral0d414722009-01-19 20:29:59 +0530862 goto err_out_free_mmio_region_2;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700863 }
864
865 DBG_ERROR("sxg: %s found Alacritech SXG PCI, MMIO at %p, "
866 "start[%lx] len[%lx], IRQ %d.\n", __func__,
867 memmapped_ioaddr, mmio_start, mmio_len, pcidev->irq);
868
869 adapter->UcodeRegs = (void *)memmapped_ioaddr;
870
871 adapter->State = SXG_STATE_INITIALIZING;
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +0530872 /*
873 * Maintain a list of all adapters anchored by
874 * the global SxgDriver structure.
875 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700876 adapter->Next = SxgDriver.Adapters;
877 SxgDriver.Adapters = adapter;
878 adapter->AdapterID = ++SxgDriver.AdapterID;
879
J.R. Maurob243c4a2008-10-20 19:28:58 -0400880 /* Initialize CRC table used to determine multicast hash */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700881 sxg_mcast_init_crc32();
882
883 adapter->JumboEnabled = FALSE;
884 adapter->RssEnabled = FALSE;
885 if (adapter->JumboEnabled) {
886 adapter->FrameSize = JUMBOMAXFRAME;
887 adapter->ReceiveBufferSize = SXG_RCV_JUMBO_BUFFER_SIZE;
888 } else {
889 adapter->FrameSize = ETHERMAXFRAME;
890 adapter->ReceiveBufferSize = SXG_RCV_DATA_BUFFER_SIZE;
891 }
892
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530893 /*
894 * status = SXG_READ_EEPROM(adapter);
895 * if (!status) {
896 * goto sxg_init_bad;
897 * }
898 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700899
Harvey Harrisone88bd232008-10-17 14:46:10 -0700900 DBG_ERROR("sxg: %s ENTER sxg_config_pci\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700901 sxg_config_pci(pcidev);
Harvey Harrisone88bd232008-10-17 14:46:10 -0700902 DBG_ERROR("sxg: %s EXIT sxg_config_pci\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700903
Harvey Harrisone88bd232008-10-17 14:46:10 -0700904 DBG_ERROR("sxg: %s ENTER sxg_init_driver\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700905 sxg_init_driver();
Harvey Harrisone88bd232008-10-17 14:46:10 -0700906 DBG_ERROR("sxg: %s EXIT sxg_init_driver\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700907
908 adapter->vendid = pci_tbl_entry->vendor;
909 adapter->devid = pci_tbl_entry->device;
910 adapter->subsysid = pci_tbl_entry->subdevice;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700911 adapter->slotnumber = ((pcidev->devfn >> 3) & 0x1F);
912 adapter->functionnumber = (pcidev->devfn & 0x7);
913 adapter->memorylength = pci_resource_len(pcidev, 0);
914 adapter->irq = pcidev->irq;
915 adapter->next_netdevice = head_netdevice;
916 head_netdevice = netdev;
J.R. Maurob243c4a2008-10-20 19:28:58 -0400917 adapter->port = 0; /*adapter->functionnumber; */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700918
J.R. Maurob243c4a2008-10-20 19:28:58 -0400919 /* Allocate memory and other resources */
Harvey Harrisone88bd232008-10-17 14:46:10 -0700920 DBG_ERROR("sxg: %s ENTER sxg_allocate_resources\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700921 status = sxg_allocate_resources(adapter);
922 DBG_ERROR("sxg: %s EXIT sxg_allocate_resources status %x\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -0700923 __func__, status);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700924 if (status != STATUS_SUCCESS) {
925 goto err_out_unmap;
926 }
927
Harvey Harrisone88bd232008-10-17 14:46:10 -0700928 DBG_ERROR("sxg: %s ENTER sxg_download_microcode\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700929 if (sxg_download_microcode(adapter, SXG_UCODE_SAHARA)) {
930 DBG_ERROR("sxg: %s ENTER sxg_adapter_set_hwaddr\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -0700931 __func__);
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +0530932 sxg_read_config(adapter);
Mithlesh Thukral54aed112009-01-19 20:27:17 +0530933 status = sxg_adapter_set_hwaddr(adapter);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700934 } else {
935 adapter->state = ADAPT_FAIL;
936 adapter->linkstate = LINK_DOWN;
937 DBG_ERROR("sxg_download_microcode FAILED status[%x]\n", status);
938 }
939
940 netdev->base_addr = (unsigned long)adapter->base_addr;
941 netdev->irq = adapter->irq;
942 netdev->open = sxg_entry_open;
943 netdev->stop = sxg_entry_halt;
944 netdev->hard_start_xmit = sxg_send_packets;
945 netdev->do_ioctl = sxg_ioctl;
Mithlesh Thukral7c66b142009-02-06 19:30:40 +0530946 netdev->change_mtu = sxg_change_mtu;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700947#if XXXTODO
948 netdev->set_mac_address = sxg_mac_set_address;
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +0530949#endif
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700950 netdev->get_stats = sxg_get_stats;
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +0530951 netdev->set_multicast_list = sxg_mcast_set_list;
Mithlesh Thukral371d7a92009-01-19 20:22:34 +0530952 SET_ETHTOOL_OPS(netdev, &sxg_nic_ethtool_ops);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700953
954 strcpy(netdev->name, "eth%d");
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530955 /* strcpy(netdev->name, pci_name(pcidev)); */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700956 if ((err = register_netdev(netdev))) {
957 DBG_ERROR("Cannot register net device, aborting. %s\n",
958 netdev->name);
959 goto err_out_unmap;
960 }
961
Mithlesh Thukralb62a2942009-01-30 20:19:03 +0530962 netif_napi_add(netdev, &adapter->napi,
963 sxg_poll, SXG_NETDEV_WEIGHT);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700964 DBG_ERROR
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530965 ("sxg: %s addr 0x%lx, irq %d, MAC addr \
966 %02X:%02X:%02X:%02X:%02X:%02X\n",
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700967 netdev->name, netdev->base_addr, pcidev->irq, netdev->dev_addr[0],
968 netdev->dev_addr[1], netdev->dev_addr[2], netdev->dev_addr[3],
969 netdev->dev_addr[4], netdev->dev_addr[5]);
970
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530971 /* sxg_init_bad: */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700972 ASSERT(status == FALSE);
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530973 /* sxg_free_adapter(adapter); */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700974
Harvey Harrisone88bd232008-10-17 14:46:10 -0700975 DBG_ERROR("sxg: %s EXIT status[%x] jiffies[%lx] cpu %d\n", __func__,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700976 status, jiffies, smp_processor_id());
977 return status;
978
979 err_out_unmap:
Mithlesh Thukral0d414722009-01-19 20:29:59 +0530980 sxg_free_resources(adapter);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700981
Mithlesh Thukral0d414722009-01-19 20:29:59 +0530982 err_out_free_mmio_region_2:
983
984 mmio_start = pci_resource_start(pcidev, 2);
985 mmio_len = pci_resource_len(pcidev, 2);
986 release_mem_region(mmio_start, mmio_len);
987
988 err_out_free_mmio_region_0:
989
990 mmio_start = pci_resource_start(pcidev, 0);
991 mmio_len = pci_resource_len(pcidev, 0);
992
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700993 release_mem_region(mmio_start, mmio_len);
994
995 err_out_exit_sxg_probe:
996
Harvey Harrisone88bd232008-10-17 14:46:10 -0700997 DBG_ERROR("%s EXIT jiffies[%lx] cpu %d\n", __func__, jiffies,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700998 smp_processor_id());
999
Mithlesh Thukral0d414722009-01-19 20:29:59 +05301000 pci_disable_device(pcidev);
1001 DBG_ERROR("sxg: %s deallocate device\n", __FUNCTION__);
1002 kfree(netdev);
1003 printk("Exit %s, Sxg driver loading failed..\n", __FUNCTION__);
1004
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001005 return -ENODEV;
1006}
1007
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001008/*
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301009 * LINE BASE Interrupt routines..
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001010 *
1011 * sxg_disable_interrupt
1012 *
1013 * DisableInterrupt Handler
1014 *
1015 * Arguments:
1016 *
1017 * adapter: Our adapter structure
1018 *
1019 * Return Value:
1020 * None.
1021 */
J.R. Mauro73b07062008-10-28 18:42:02 -04001022static void sxg_disable_interrupt(struct adapter_t *adapter)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001023{
1024 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DisIntr",
1025 adapter, adapter->InterruptsEnabled, 0, 0);
J.R. Maurob243c4a2008-10-20 19:28:58 -04001026 /* For now, RSS is disabled with line based interrupts */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001027 ASSERT(adapter->RssEnabled == FALSE);
1028 ASSERT(adapter->MsiEnabled == FALSE);
J.R. Maurob243c4a2008-10-20 19:28:58 -04001029 /* Turn off interrupts by writing to the icr register. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001030 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_DISABLE), TRUE);
1031
1032 adapter->InterruptsEnabled = 0;
1033
1034 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDisIntr",
1035 adapter, adapter->InterruptsEnabled, 0, 0);
1036}
1037
1038/*
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001039 * sxg_enable_interrupt
1040 *
1041 * EnableInterrupt Handler
1042 *
1043 * Arguments:
1044 *
1045 * adapter: Our adapter structure
1046 *
1047 * Return Value:
1048 * None.
1049 */
J.R. Mauro73b07062008-10-28 18:42:02 -04001050static void sxg_enable_interrupt(struct adapter_t *adapter)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001051{
1052 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "EnIntr",
1053 adapter, adapter->InterruptsEnabled, 0, 0);
J.R. Maurob243c4a2008-10-20 19:28:58 -04001054 /* For now, RSS is disabled with line based interrupts */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001055 ASSERT(adapter->RssEnabled == FALSE);
1056 ASSERT(adapter->MsiEnabled == FALSE);
J.R. Maurob243c4a2008-10-20 19:28:58 -04001057 /* Turn on interrupts by writing to the icr register. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001058 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_ENABLE), TRUE);
1059
1060 adapter->InterruptsEnabled = 1;
1061
1062 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XEnIntr",
1063 adapter, 0, 0, 0);
1064}
1065
1066/*
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001067 * sxg_isr - Process an line-based interrupt
1068 *
1069 * Arguments:
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301070 * Context - Our adapter structure
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001071 * QueueDefault - Output parameter to queue to default CPU
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301072 * TargetCpus - Output bitmap to schedule DPC's
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001073 *
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301074 * Return Value: TRUE if our interrupt
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001075 */
1076static irqreturn_t sxg_isr(int irq, void *dev_id)
1077{
Mithlesh Thukral942798b2009-01-05 21:14:34 +05301078 struct net_device *dev = (struct net_device *) dev_id;
J.R. Mauro73b07062008-10-28 18:42:02 -04001079 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001080
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05301081 if(adapter->state != ADAPT_UP)
1082 return IRQ_NONE;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001083 adapter->Stats.NumInts++;
1084 if (adapter->Isr[0] == 0) {
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301085 /*
1086 * The SLIC driver used to experience a number of spurious
1087 * interrupts due to the delay associated with the masking of
1088 * the interrupt (we'd bounce back in here). If we see that
1089 * again with Sahara,add a READ_REG of the Icr register after
1090 * the WRITE_REG below.
1091 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001092 adapter->Stats.FalseInts++;
1093 return IRQ_NONE;
1094 }
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301095 /*
1096 * Move the Isr contents and clear the value in
1097 * shared memory, and mask interrupts
1098 */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301099 /* ASSERT(adapter->IsrDpcsPending == 0); */
J.R. Maurob243c4a2008-10-20 19:28:58 -04001100#if XXXTODO /* RSS Stuff */
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301101 /*
1102 * If RSS is enabled and the ISR specifies SXG_ISR_EVENT, then
1103 * schedule DPC's based on event queues.
1104 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001105 if (adapter->RssEnabled && (adapter->IsrCopy[0] & SXG_ISR_EVENT)) {
1106 for (i = 0;
1107 i < adapter->RssSystemInfo->ProcessorInfo.RssCpuCount;
1108 i++) {
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301109 struct sxg_event_ring *EventRing =
1110 &adapter->EventRings[i];
Mithlesh Thukral942798b2009-01-05 21:14:34 +05301111 struct sxg_event *Event =
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001112 &EventRing->Ring[adapter->NextEvent[i]];
J.R. Mauro5c7514e2008-10-05 20:38:52 -04001113 unsigned char Cpu =
1114 adapter->RssSystemInfo->RssIdToCpu[i];
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001115 if (Event->Status & EVENT_STATUS_VALID) {
1116 adapter->IsrDpcsPending++;
1117 CpuMask |= (1 << Cpu);
1118 }
1119 }
1120 }
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301121 /*
1122 * Now, either schedule the CPUs specified by the CpuMask,
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301123 * or queue default
1124 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001125 if (CpuMask) {
1126 *QueueDefault = FALSE;
1127 } else {
1128 adapter->IsrDpcsPending = 1;
1129 *QueueDefault = TRUE;
1130 }
1131 *TargetCpus = CpuMask;
1132#endif
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05301133 sxg_interrupt(adapter);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001134
1135 return IRQ_HANDLED;
1136}
1137
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05301138static void sxg_interrupt(struct adapter_t *adapter)
1139{
1140 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_MASK), TRUE);
1141
1142 if (netif_rx_schedule_prep(&adapter->napi)) {
1143 __netif_rx_schedule(&adapter->napi);
1144 }
1145}
1146
1147static void sxg_handle_interrupt(struct adapter_t *adapter, int *work_done,
1148 int budget)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001149{
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301150 /* unsigned char RssId = 0; */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001151 u32 NewIsr;
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05301152 int sxg_napi_continue = 1;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001153 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "HndlIntr",
1154 adapter, adapter->IsrCopy[0], 0, 0);
J.R. Maurob243c4a2008-10-20 19:28:58 -04001155 /* For now, RSS is disabled with line based interrupts */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001156 ASSERT(adapter->RssEnabled == FALSE);
1157 ASSERT(adapter->MsiEnabled == FALSE);
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05301158
1159 adapter->IsrCopy[0] = adapter->Isr[0];
1160 adapter->Isr[0] = 0;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001161
J.R. Maurob243c4a2008-10-20 19:28:58 -04001162 /* Always process the event queue. */
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05301163 while (sxg_napi_continue)
1164 {
1165 sxg_process_event_queue(adapter,
1166 (adapter->RssEnabled ? /*RssId */ 0 : 0),
1167 &sxg_napi_continue, work_done, budget);
1168 }
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001169
J.R. Maurob243c4a2008-10-20 19:28:58 -04001170#if XXXTODO /* RSS stuff */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001171 if (--adapter->IsrDpcsPending) {
J.R. Maurob243c4a2008-10-20 19:28:58 -04001172 /* We're done. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001173 ASSERT(adapter->RssEnabled);
1174 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DPCsPend",
1175 adapter, 0, 0, 0);
1176 return;
1177 }
1178#endif
J.R. Maurob243c4a2008-10-20 19:28:58 -04001179 /* Last (or only) DPC processes the ISR and clears the interrupt. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001180 NewIsr = sxg_process_isr(adapter, 0);
J.R. Maurob243c4a2008-10-20 19:28:58 -04001181 /* Reenable interrupts */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001182 adapter->IsrCopy[0] = 0;
1183 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "ClearIsr",
1184 adapter, NewIsr, 0, 0);
1185
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001186 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XHndlInt",
1187 adapter, 0, 0, 0);
1188}
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05301189static int sxg_poll(struct napi_struct *napi, int budget)
1190{
1191 struct adapter_t *adapter = container_of(napi, struct adapter_t, napi);
1192 int work_done = 0;
1193
1194 sxg_handle_interrupt(adapter, &work_done, budget);
1195
1196 if (work_done < budget) {
1197 netif_rx_complete(napi);
1198 WRITE_REG(adapter->UcodeRegs[0].Isr, 0, TRUE);
1199 }
1200
1201 return work_done;
1202}
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001203
1204/*
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001205 * sxg_process_isr - Process an interrupt. Called from the line-based and
1206 * message based interrupt DPC routines
1207 *
1208 * Arguments:
1209 * adapter - Our adapter structure
1210 * Queue - The ISR that needs processing
1211 *
1212 * Return Value:
1213 * None
1214 */
J.R. Mauro73b07062008-10-28 18:42:02 -04001215static int sxg_process_isr(struct adapter_t *adapter, u32 MessageId)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001216{
1217 u32 Isr = adapter->IsrCopy[MessageId];
1218 u32 NewIsr = 0;
1219
1220 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "ProcIsr",
1221 adapter, Isr, 0, 0);
1222
J.R. Maurob243c4a2008-10-20 19:28:58 -04001223 /* Error */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001224 if (Isr & SXG_ISR_ERR) {
1225 if (Isr & SXG_ISR_PDQF) {
1226 adapter->Stats.PdqFull++;
Harvey Harrisone88bd232008-10-17 14:46:10 -07001227 DBG_ERROR("%s: SXG_ISR_ERR PDQF!!\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001228 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04001229 /* No host buffer */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001230 if (Isr & SXG_ISR_RMISS) {
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301231 /*
1232 * There is a bunch of code in the SLIC driver which
1233 * attempts to process more receive events per DPC
1234 * if we start to fall behind. We'll probablyd
1235 * need to do something similar here, but hold
1236 * off for now. I don't want to make the code more
1237 * complicated than strictly needed.
1238 */
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05301239 adapter->stats.rx_missed_errors++;
Mithlesh Thukral54aed112009-01-19 20:27:17 +05301240 if (adapter->stats.rx_missed_errors< 5) {
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001241 DBG_ERROR("%s: SXG_ISR_ERR RMISS!!\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -07001242 __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001243 }
1244 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04001245 /* Card crash */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001246 if (Isr & SXG_ISR_DEAD) {
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301247 /*
1248 * Set aside the crash info and set the adapter state
1249 * to RESET
1250 */
1251 adapter->CrashCpu = (unsigned char)
1252 ((Isr & SXG_ISR_CPU) >> SXG_ISR_CPU_SHIFT);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001253 adapter->CrashLocation = (ushort) (Isr & SXG_ISR_CRASH);
1254 adapter->Dead = TRUE;
Harvey Harrisone88bd232008-10-17 14:46:10 -07001255 DBG_ERROR("%s: ISR_DEAD %x, CPU: %d\n", __func__,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001256 adapter->CrashLocation, adapter->CrashCpu);
1257 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04001258 /* Event ring full */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001259 if (Isr & SXG_ISR_ERFULL) {
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301260 /*
1261 * Same issue as RMISS, really. This means the
1262 * host is falling behind the card. Need to increase
1263 * event ring size, process more events per interrupt,
1264 * and/or reduce/remove interrupt aggregation.
1265 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001266 adapter->Stats.EventRingFull++;
1267 DBG_ERROR("%s: SXG_ISR_ERR EVENT RING FULL!!\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -07001268 __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001269 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04001270 /* Transmit drop - no DRAM buffers or XMT error */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001271 if (Isr & SXG_ISR_XDROP) {
Harvey Harrisone88bd232008-10-17 14:46:10 -07001272 DBG_ERROR("%s: SXG_ISR_ERR XDROP!!\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001273 }
1274 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04001275 /* Slowpath send completions */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001276 if (Isr & SXG_ISR_SPSEND) {
Mithlesh Thukralc5e5cf52009-02-06 19:31:40 +05301277 sxg_complete_slow_send(adapter);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001278 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04001279 /* Dump */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001280 if (Isr & SXG_ISR_UPC) {
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301281 /* Maybe change when debug is added.. */
Mithlesh Thukral54aed112009-01-19 20:27:17 +05301282// ASSERT(adapter->DumpCmdRunning);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001283 adapter->DumpCmdRunning = FALSE;
1284 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04001285 /* Link event */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001286 if (Isr & SXG_ISR_LINK) {
1287 sxg_link_event(adapter);
1288 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04001289 /* Debug - breakpoint hit */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001290 if (Isr & SXG_ISR_BREAK) {
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301291 /*
1292 * At the moment AGDB isn't written to support interactive
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301293 * debug sessions. When it is, this interrupt will be used to
1294 * signal AGDB that it has hit a breakpoint. For now, ASSERT.
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301295 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001296 ASSERT(0);
1297 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04001298 /* Heartbeat response */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001299 if (Isr & SXG_ISR_PING) {
1300 adapter->PingOutstanding = FALSE;
1301 }
1302 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XProcIsr",
1303 adapter, Isr, NewIsr, 0);
1304
1305 return (NewIsr);
1306}
1307
1308/*
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001309 * sxg_process_event_queue - Process our event queue
1310 *
1311 * Arguments:
1312 * - adapter - Adapter structure
1313 * - RssId - The event queue requiring processing
1314 *
1315 * Return Value:
1316 * None.
1317 */
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05301318static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId,
1319 int *sxg_napi_continue, int *work_done, int budget)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001320{
Mithlesh Thukral942798b2009-01-05 21:14:34 +05301321 struct sxg_event_ring *EventRing = &adapter->EventRings[RssId];
1322 struct sxg_event *Event = &EventRing->Ring[adapter->NextEvent[RssId]];
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001323 u32 EventsProcessed = 0, Batches = 0;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001324 struct sk_buff *skb;
1325#ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
1326 struct sk_buff *prev_skb = NULL;
1327 struct sk_buff *IndicationList[SXG_RCV_ARRAYSIZE];
1328 u32 Index;
Mithlesh Thukral942798b2009-01-05 21:14:34 +05301329 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001330#endif
1331 u32 ReturnStatus = 0;
Mithlesh Thukral7c66b142009-02-06 19:30:40 +05301332 int sxg_rcv_data_buffers = SXG_RCV_DATA_BUFFERS;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001333
1334 ASSERT((adapter->State == SXG_STATE_RUNNING) ||
1335 (adapter->State == SXG_STATE_PAUSING) ||
1336 (adapter->State == SXG_STATE_PAUSED) ||
1337 (adapter->State == SXG_STATE_HALTING));
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301338 /*
1339 * We may still have unprocessed events on the queue if
1340 * the card crashed. Don't process them.
1341 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001342 if (adapter->Dead) {
1343 return (0);
1344 }
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301345 /*
1346 * In theory there should only be a single processor that
1347 * accesses this queue, and only at interrupt-DPC time. So/
1348 * we shouldn't need a lock for any of this.
1349 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001350 while (Event->Status & EVENT_STATUS_VALID) {
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05301351 (*sxg_napi_continue) = 1;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001352 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "Event",
1353 Event, Event->Code, Event->Status,
1354 adapter->NextEvent);
1355 switch (Event->Code) {
1356 case EVENT_CODE_BUFFERS:
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301357 /* struct sxg_ring_info Head & Tail == unsigned char */
1358 ASSERT(!(Event->CommandIndex & 0xFF00));
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001359 sxg_complete_descriptor_blocks(adapter,
1360 Event->CommandIndex);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001361 break;
1362 case EVENT_CODE_SLOWRCV:
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05301363 (*work_done)++;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001364 --adapter->RcvBuffersOnCard;
1365 if ((skb = sxg_slow_receive(adapter, Event))) {
1366 u32 rx_bytes;
1367#ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
J.R. Maurob243c4a2008-10-20 19:28:58 -04001368 /* Add it to our indication list */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001369 SXG_ADD_RCV_PACKET(adapter, skb, prev_skb,
1370 IndicationList, num_skbs);
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301371 /*
1372 * Linux, we just pass up each skb to the
1373 * protocol above at this point, there is no
1374 * capability of an indication list.
1375 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001376#else
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301377 /* CHECK skb_pull(skb, INIC_RCVBUF_HEADSIZE); */
1378 /* (rcvbuf->length & IRHDDR_FLEN_MSK); */
1379 rx_bytes = Event->Length;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001380 adapter->stats.rx_packets++;
1381 adapter->stats.rx_bytes += rx_bytes;
1382#if SXG_OFFLOAD_IP_CHECKSUM
1383 skb->ip_summed = CHECKSUM_UNNECESSARY;
1384#endif
1385 skb->dev = adapter->netdev;
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05301386 netif_receive_skb(skb);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001387#endif
1388 }
1389 break;
1390 default:
1391 DBG_ERROR("%s: ERROR Invalid EventCode %d\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -07001392 __func__, Event->Code);
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301393 /* ASSERT(0); */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001394 }
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301395 /*
1396 * See if we need to restock card receive buffers.
1397 * There are two things to note here:
1398 * First - This test is not SMP safe. The
1399 * adapter->BuffersOnCard field is protected via atomic
1400 * interlocked calls, but we do not protect it with respect
1401 * to these tests. The only way to do that is with a lock,
1402 * and I don't want to grab a lock every time we adjust the
1403 * BuffersOnCard count. Instead, we allow the buffer
1404 * replenishment to be off once in a while. The worst that
1405 * can happen is the card is given on more-or-less descriptor
1406 * block than the arbitrary value we've chosen. No big deal
1407 * In short DO NOT ADD A LOCK HERE, OR WHERE RcvBuffersOnCard
1408 * is adjusted.
1409 * Second - We expect this test to rarely
1410 * evaluate to true. We attempt to refill descriptor blocks
1411 * as they are returned to us (sxg_complete_descriptor_blocks)
1412 * so The only time this should evaluate to true is when
1413 * sxg_complete_descriptor_blocks failed to allocate
1414 * receive buffers.
1415 */
Mithlesh Thukral7c66b142009-02-06 19:30:40 +05301416 if (adapter->JumboEnabled)
1417 sxg_rcv_data_buffers = SXG_JUMBO_RCV_DATA_BUFFERS;
1418
1419 if (adapter->RcvBuffersOnCard < sxg_rcv_data_buffers) {
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001420 sxg_stock_rcv_buffers(adapter);
1421 }
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301422 /*
1423 * It's more efficient to just set this to zero.
1424 * But clearing the top bit saves potential debug info...
1425 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001426 Event->Status &= ~EVENT_STATUS_VALID;
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301427 /* Advance to the next event */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001428 SXG_ADVANCE_INDEX(adapter->NextEvent[RssId], EVENT_RING_SIZE);
1429 Event = &EventRing->Ring[adapter->NextEvent[RssId]];
1430 EventsProcessed++;
1431 if (EventsProcessed == EVENT_RING_BATCH) {
J.R. Maurob243c4a2008-10-20 19:28:58 -04001432 /* Release a batch of events back to the card */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001433 WRITE_REG(adapter->UcodeRegs[RssId].EventRelease,
1434 EVENT_RING_BATCH, FALSE);
1435 EventsProcessed = 0;
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301436 /*
1437 * If we've processed our batch limit, break out of the
1438 * loop and return SXG_ISR_EVENT to arrange for us to
1439 * be called again
1440 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001441 if (Batches++ == EVENT_BATCH_LIMIT) {
1442 SXG_TRACE(TRACE_SXG, SxgTraceBuffer,
1443 TRACE_NOISY, "EvtLimit", Batches,
1444 adapter->NextEvent, 0, 0);
1445 ReturnStatus = SXG_ISR_EVENT;
1446 break;
1447 }
1448 }
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05301449 if (*work_done >= budget) {
1450 WRITE_REG(adapter->UcodeRegs[RssId].EventRelease,
1451 EventsProcessed, FALSE);
1452 EventsProcessed = 0;
1453 (*sxg_napi_continue) = 0;
1454 break;
1455 }
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001456 }
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05301457 if (!(Event->Status & EVENT_STATUS_VALID))
1458 (*sxg_napi_continue) = 0;
1459
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001460#ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
J.R. Maurob243c4a2008-10-20 19:28:58 -04001461 /* Indicate any received dumb-nic frames */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001462 SXG_INDICATE_PACKETS(adapter, IndicationList, num_skbs);
1463#endif
J.R. Maurob243c4a2008-10-20 19:28:58 -04001464 /* Release events back to the card. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001465 if (EventsProcessed) {
1466 WRITE_REG(adapter->UcodeRegs[RssId].EventRelease,
1467 EventsProcessed, FALSE);
1468 }
1469 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XPrcEvnt",
1470 Batches, EventsProcessed, adapter->NextEvent, num_skbs);
1471
1472 return (ReturnStatus);
1473}
1474
1475/*
1476 * sxg_complete_slow_send - Complete slowpath or dumb-nic sends
1477 *
1478 * Arguments -
1479 * adapter - A pointer to our adapter structure
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001480 * Return
1481 * None
1482 */
Mithlesh Thukralc5e5cf52009-02-06 19:31:40 +05301483static void sxg_complete_slow_send(struct adapter_t *adapter)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001484{
Mithlesh Thukral942798b2009-01-05 21:14:34 +05301485 struct sxg_xmt_ring *XmtRing = &adapter->XmtRings[0];
1486 struct sxg_ring_info *XmtRingInfo = &adapter->XmtRingZeroInfo;
J.R. Mauro5c7514e2008-10-05 20:38:52 -04001487 u32 *ContextType;
Mithlesh Thukral942798b2009-01-05 21:14:34 +05301488 struct sxg_cmd *XmtCmd;
Mithlesh Thukral54aed112009-01-19 20:27:17 +05301489 unsigned long flags = 0;
1490 unsigned long sgl_flags = 0;
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05301491 unsigned int processed_count = 0;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001492
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301493 /*
1494 * NOTE - This lock is dropped and regrabbed in this loop.
1495 * This means two different processors can both be running/
1496 * through this loop. Be *very* careful.
1497 */
Mithlesh Thukralc5e5cf52009-02-06 19:31:40 +05301498 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05301499
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001500 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnds",
1501 adapter, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
1502
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05301503 while ((XmtRingInfo->Tail != *adapter->XmtRingZeroIndex)
1504 && processed_count++ < SXG_COMPLETE_SLOW_SEND_LIMIT) {
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301505 /*
1506 * Locate the current Cmd (ring descriptor entry), and
1507 * associated SGL, and advance the tail
1508 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001509 SXG_RETURN_CMD(XmtRing, XmtRingInfo, XmtCmd, ContextType);
1510 ASSERT(ContextType);
1511 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnd",
1512 XmtRingInfo->Head, XmtRingInfo->Tail, XmtCmd, 0);
J.R. Maurob243c4a2008-10-20 19:28:58 -04001513 /* Clear the SGL field. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001514 XmtCmd->Sgl = 0;
1515
1516 switch (*ContextType) {
1517 case SXG_SGL_DUMB:
1518 {
1519 struct sk_buff *skb;
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301520 struct sxg_scatter_gather *SxgSgl =
1521 (struct sxg_scatter_gather *)ContextType;
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05301522 dma64_addr_t FirstSgeAddress;
1523 u32 FirstSgeLength;
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05301524
J.R. Maurob243c4a2008-10-20 19:28:58 -04001525 /* Dumb-nic send. Command context is the dumb-nic SGL */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001526 skb = (struct sk_buff *)ContextType;
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05301527 skb = SxgSgl->DumbPacket;
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05301528 FirstSgeAddress = XmtCmd->Buffer.FirstSgeAddress;
1529 FirstSgeLength = XmtCmd->Buffer.FirstSgeLength;
J.R. Maurob243c4a2008-10-20 19:28:58 -04001530 /* Complete the send */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001531 SXG_TRACE(TRACE_SXG, SxgTraceBuffer,
1532 TRACE_IMPORTANT, "DmSndCmp", skb, 0,
1533 0, 0);
1534 ASSERT(adapter->Stats.XmtQLen);
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301535 /*
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301536 * Now drop the lock and complete the send
1537 * back to Microsoft. We need to drop the lock
1538 * because Microsoft can come back with a
1539 * chimney send, which results in a double trip
1540 * in SxgTcpOuput
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301541 */
Mithlesh Thukralc5e5cf52009-02-06 19:31:40 +05301542 spin_unlock_irqrestore(
1543 &adapter->XmtZeroLock, flags);
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05301544
1545 SxgSgl->DumbPacket = NULL;
1546 SXG_COMPLETE_DUMB_SEND(adapter, skb,
1547 FirstSgeAddress,
1548 FirstSgeLength);
Mithlesh Thukralc5e5cf52009-02-06 19:31:40 +05301549 SXG_FREE_SGL_BUFFER(adapter, SxgSgl, NULL);
J.R. Maurob243c4a2008-10-20 19:28:58 -04001550 /* and reacquire.. */
Mithlesh Thukralc5e5cf52009-02-06 19:31:40 +05301551 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001552 }
1553 break;
1554 default:
1555 ASSERT(0);
1556 }
1557 }
Mithlesh Thukralc5e5cf52009-02-06 19:31:40 +05301558 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001559 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnd",
1560 adapter, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
1561}
1562
1563/*
1564 * sxg_slow_receive
1565 *
1566 * Arguments -
1567 * adapter - A pointer to our adapter structure
1568 * Event - Receive event
1569 *
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301570 * Return - skb
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001571 */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301572static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter,
1573 struct sxg_event *Event)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001574{
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05301575 u32 BufferSize = adapter->ReceiveBufferSize;
Mithlesh Thukral942798b2009-01-05 21:14:34 +05301576 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001577 struct sk_buff *Packet;
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05301578 static int read_counter = 0;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001579
Mithlesh Thukral942798b2009-01-05 21:14:34 +05301580 RcvDataBufferHdr = (struct sxg_rcv_data_buffer_hdr *) Event->HostHandle;
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05301581 if(read_counter++ & 0x100)
1582 {
1583 sxg_collect_statistics(adapter);
1584 read_counter = 0;
1585 }
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001586 ASSERT(RcvDataBufferHdr);
1587 ASSERT(RcvDataBufferHdr->State == SXG_BUFFER_ONCARD);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001588 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "SlowRcv", Event,
1589 RcvDataBufferHdr, RcvDataBufferHdr->State,
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05301590 /*RcvDataBufferHdr->VirtualAddress*/ 0);
J.R. Maurob243c4a2008-10-20 19:28:58 -04001591 /* Drop rcv frames in non-running state */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001592 switch (adapter->State) {
1593 case SXG_STATE_RUNNING:
1594 break;
1595 case SXG_STATE_PAUSING:
1596 case SXG_STATE_PAUSED:
1597 case SXG_STATE_HALTING:
1598 goto drop;
1599 default:
1600 ASSERT(0);
1601 goto drop;
1602 }
1603
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301604 /*
1605 * memcpy(SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
1606 * RcvDataBufferHdr->VirtualAddress, Event->Length);
1607 */
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05301608
J.R. Maurob243c4a2008-10-20 19:28:58 -04001609 /* Change buffer state to UPSTREAM */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001610 RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM;
1611 if (Event->Status & EVENT_STATUS_RCVERR) {
1612 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "RcvError",
1613 Event, Event->Status, Event->HostHandle, 0);
J.R. Maurob243c4a2008-10-20 19:28:58 -04001614 /* XXXTODO - Remove this print later */
J.R. Mauro5c7514e2008-10-05 20:38:52 -04001615 DBG_ERROR("SXG: Receive error %x\n", *(u32 *)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001616 SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr));
J.R. Mauro5c7514e2008-10-05 20:38:52 -04001617 sxg_process_rcv_error(adapter, *(u32 *)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001618 SXG_RECEIVE_DATA_LOCATION
1619 (RcvDataBufferHdr));
1620 goto drop;
1621 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04001622#if XXXTODO /* VLAN stuff */
1623 /* If there's a VLAN tag, extract it and validate it */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301624 if (((struct ether_header *)
1625 (SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr)))->EtherType
1626 == ETHERTYPE_VLAN) {
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001627 if (SxgExtractVlanHeader(adapter, RcvDataBufferHdr, Event) !=
1628 STATUS_SUCCESS) {
1629 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY,
1630 "BadVlan", Event,
1631 SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
1632 Event->Length, 0);
1633 goto drop;
1634 }
1635 }
1636#endif
J.R. Maurob243c4a2008-10-20 19:28:58 -04001637 /* Dumb-nic frame. See if it passes our mac filter and update stats */
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301638
Mithlesh Thukralb040b072009-01-28 07:08:11 +05301639 if (!sxg_mac_filter(adapter,
1640 (struct ether_header *)(SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr)),
1641 Event->Length)) {
1642 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "RcvFiltr",
1643 Event, SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
1644 Event->Length, 0);
1645 goto drop;
1646 }
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001647
1648 Packet = RcvDataBufferHdr->SxgDumbRcvPacket;
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05301649 SXG_ADJUST_RCV_PACKET(Packet, RcvDataBufferHdr, Event);
1650 Packet->protocol = eth_type_trans(Packet, adapter->netdev);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001651
1652 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumbRcv",
1653 RcvDataBufferHdr, Packet, Event->Length, 0);
J.R. Maurob243c4a2008-10-20 19:28:58 -04001654 /* Lastly adjust the receive packet length. */
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05301655 RcvDataBufferHdr->SxgDumbRcvPacket = NULL;
Mithlesh Thukral54aed112009-01-19 20:27:17 +05301656 RcvDataBufferHdr->PhysicalAddress = (dma_addr_t)NULL;
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05301657 SXG_ALLOCATE_RCV_PACKET(adapter, RcvDataBufferHdr, BufferSize);
1658 if (RcvDataBufferHdr->skb)
1659 {
1660 spin_lock(&adapter->RcvQLock);
1661 SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05301662 // adapter->RcvBuffersOnCard ++;
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05301663 spin_unlock(&adapter->RcvQLock);
1664 }
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001665 return (Packet);
1666
1667 drop:
1668 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DropRcv",
1669 RcvDataBufferHdr, Event->Length, 0, 0);
Mithlesh Thukral54aed112009-01-19 20:27:17 +05301670 adapter->stats.rx_dropped++;
1671// adapter->Stats.RcvDiscards++;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001672 spin_lock(&adapter->RcvQLock);
1673 SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
1674 spin_unlock(&adapter->RcvQLock);
1675 return (NULL);
1676}
1677
1678/*
1679 * sxg_process_rcv_error - process receive error and update
1680 * stats
1681 *
1682 * Arguments:
1683 * adapter - Adapter structure
1684 * ErrorStatus - 4-byte receive error status
1685 *
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301686 * Return Value : None
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001687 */
J.R. Mauro73b07062008-10-28 18:42:02 -04001688static void sxg_process_rcv_error(struct adapter_t *adapter, u32 ErrorStatus)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001689{
1690 u32 Error;
1691
Mithlesh Thukral54aed112009-01-19 20:27:17 +05301692 adapter->stats.rx_errors++;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001693
1694 if (ErrorStatus & SXG_RCV_STATUS_TRANSPORT_ERROR) {
1695 Error = ErrorStatus & SXG_RCV_STATUS_TRANSPORT_MASK;
1696 switch (Error) {
1697 case SXG_RCV_STATUS_TRANSPORT_CSUM:
1698 adapter->Stats.TransportCsum++;
1699 break;
1700 case SXG_RCV_STATUS_TRANSPORT_UFLOW:
1701 adapter->Stats.TransportUflow++;
1702 break;
1703 case SXG_RCV_STATUS_TRANSPORT_HDRLEN:
1704 adapter->Stats.TransportHdrLen++;
1705 break;
1706 }
1707 }
1708 if (ErrorStatus & SXG_RCV_STATUS_NETWORK_ERROR) {
1709 Error = ErrorStatus & SXG_RCV_STATUS_NETWORK_MASK;
1710 switch (Error) {
1711 case SXG_RCV_STATUS_NETWORK_CSUM:
1712 adapter->Stats.NetworkCsum++;
1713 break;
1714 case SXG_RCV_STATUS_NETWORK_UFLOW:
1715 adapter->Stats.NetworkUflow++;
1716 break;
1717 case SXG_RCV_STATUS_NETWORK_HDRLEN:
1718 adapter->Stats.NetworkHdrLen++;
1719 break;
1720 }
1721 }
1722 if (ErrorStatus & SXG_RCV_STATUS_PARITY) {
1723 adapter->Stats.Parity++;
1724 }
1725 if (ErrorStatus & SXG_RCV_STATUS_LINK_ERROR) {
1726 Error = ErrorStatus & SXG_RCV_STATUS_LINK_MASK;
1727 switch (Error) {
1728 case SXG_RCV_STATUS_LINK_PARITY:
1729 adapter->Stats.LinkParity++;
1730 break;
1731 case SXG_RCV_STATUS_LINK_EARLY:
1732 adapter->Stats.LinkEarly++;
1733 break;
1734 case SXG_RCV_STATUS_LINK_BUFOFLOW:
1735 adapter->Stats.LinkBufOflow++;
1736 break;
1737 case SXG_RCV_STATUS_LINK_CODE:
1738 adapter->Stats.LinkCode++;
1739 break;
1740 case SXG_RCV_STATUS_LINK_DRIBBLE:
1741 adapter->Stats.LinkDribble++;
1742 break;
1743 case SXG_RCV_STATUS_LINK_CRC:
1744 adapter->Stats.LinkCrc++;
1745 break;
1746 case SXG_RCV_STATUS_LINK_OFLOW:
1747 adapter->Stats.LinkOflow++;
1748 break;
1749 case SXG_RCV_STATUS_LINK_UFLOW:
1750 adapter->Stats.LinkUflow++;
1751 break;
1752 }
1753 }
1754}
1755
1756/*
1757 * sxg_mac_filter
1758 *
1759 * Arguments:
1760 * adapter - Adapter structure
1761 * pether - Ethernet header
1762 * length - Frame length
1763 *
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301764 * Return Value : TRUE if the frame is to be allowed
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001765 */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301766static bool sxg_mac_filter(struct adapter_t *adapter,
1767 struct ether_header *EtherHdr, ushort length)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001768{
1769 bool EqualAddr;
Mithlesh Thukralb040b072009-01-28 07:08:11 +05301770 struct net_device *dev = adapter->netdev;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001771
1772 if (SXG_MULTICAST_PACKET(EtherHdr)) {
1773 if (SXG_BROADCAST_PACKET(EtherHdr)) {
J.R. Maurob243c4a2008-10-20 19:28:58 -04001774 /* broadcast */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001775 if (adapter->MacFilter & MAC_BCAST) {
1776 adapter->Stats.DumbRcvBcastPkts++;
1777 adapter->Stats.DumbRcvBcastBytes += length;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001778 return (TRUE);
1779 }
1780 } else {
J.R. Maurob243c4a2008-10-20 19:28:58 -04001781 /* multicast */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001782 if (adapter->MacFilter & MAC_ALLMCAST) {
1783 adapter->Stats.DumbRcvMcastPkts++;
1784 adapter->Stats.DumbRcvMcastBytes += length;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001785 return (TRUE);
1786 }
1787 if (adapter->MacFilter & MAC_MCAST) {
Mithlesh Thukralb040b072009-01-28 07:08:11 +05301788 struct dev_mc_list *mclist = dev->mc_list;
1789 while (mclist) {
1790 ETHER_EQ_ADDR(mclist->da_addr,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001791 EtherHdr->ether_dhost,
1792 EqualAddr);
1793 if (EqualAddr) {
1794 adapter->Stats.
1795 DumbRcvMcastPkts++;
1796 adapter->Stats.
1797 DumbRcvMcastBytes += length;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001798 return (TRUE);
1799 }
Mithlesh Thukralb040b072009-01-28 07:08:11 +05301800 mclist = mclist->next;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001801 }
1802 }
1803 }
1804 } else if (adapter->MacFilter & MAC_DIRECTED) {
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301805 /*
1806 * Not broadcast or multicast. Must be directed at us or
1807 * the card is in promiscuous mode. Either way, consider it
1808 * ours if MAC_DIRECTED is set
1809 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001810 adapter->Stats.DumbRcvUcastPkts++;
1811 adapter->Stats.DumbRcvUcastBytes += length;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001812 return (TRUE);
1813 }
1814 if (adapter->MacFilter & MAC_PROMISC) {
J.R. Maurob243c4a2008-10-20 19:28:58 -04001815 /* Whatever it is, keep it. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001816 return (TRUE);
1817 }
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001818 return (FALSE);
1819}
Mithlesh Thukralb040b072009-01-28 07:08:11 +05301820
J.R. Mauro73b07062008-10-28 18:42:02 -04001821static int sxg_register_interrupt(struct adapter_t *adapter)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001822{
1823 if (!adapter->intrregistered) {
1824 int retval;
1825
1826 DBG_ERROR
1827 ("sxg: %s AllocAdaptRsrcs adapter[%p] dev->irq[%x] %x\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -07001828 __func__, adapter, adapter->netdev->irq, NR_IRQS);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001829
J.R. Mauro5c7514e2008-10-05 20:38:52 -04001830 spin_unlock_irqrestore(&sxg_global.driver_lock,
1831 sxg_global.flags);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001832
1833 retval = request_irq(adapter->netdev->irq,
1834 &sxg_isr,
1835 IRQF_SHARED,
1836 adapter->netdev->name, adapter->netdev);
1837
1838 spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
1839
1840 if (retval) {
1841 DBG_ERROR("sxg: request_irq (%s) FAILED [%x]\n",
1842 adapter->netdev->name, retval);
1843 return (retval);
1844 }
1845 adapter->intrregistered = 1;
1846 adapter->IntRegistered = TRUE;
J.R. Maurob243c4a2008-10-20 19:28:58 -04001847 /* Disable RSS with line-based interrupts */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001848 adapter->MsiEnabled = FALSE;
1849 adapter->RssEnabled = FALSE;
1850 DBG_ERROR("sxg: %s AllocAdaptRsrcs adapter[%p] dev->irq[%x]\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -07001851 __func__, adapter, adapter->netdev->irq);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001852 }
1853 return (STATUS_SUCCESS);
1854}
1855
J.R. Mauro73b07062008-10-28 18:42:02 -04001856static void sxg_deregister_interrupt(struct adapter_t *adapter)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001857{
Harvey Harrisone88bd232008-10-17 14:46:10 -07001858 DBG_ERROR("sxg: %s ENTER adapter[%p]\n", __func__, adapter);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001859#if XXXTODO
1860 slic_init_cleanup(adapter);
1861#endif
1862 memset(&adapter->stats, 0, sizeof(struct net_device_stats));
1863 adapter->error_interrupts = 0;
1864 adapter->rcv_interrupts = 0;
1865 adapter->xmit_interrupts = 0;
1866 adapter->linkevent_interrupts = 0;
1867 adapter->upr_interrupts = 0;
1868 adapter->num_isrs = 0;
1869 adapter->xmit_completes = 0;
1870 adapter->rcv_broadcasts = 0;
1871 adapter->rcv_multicasts = 0;
1872 adapter->rcv_unicasts = 0;
Harvey Harrisone88bd232008-10-17 14:46:10 -07001873 DBG_ERROR("sxg: %s EXIT\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001874}
1875
1876/*
1877 * sxg_if_init
1878 *
1879 * Perform initialization of our slic interface.
1880 *
1881 */
J.R. Mauro73b07062008-10-28 18:42:02 -04001882static int sxg_if_init(struct adapter_t *adapter)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001883{
Mithlesh Thukral942798b2009-01-05 21:14:34 +05301884 struct net_device *dev = adapter->netdev;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001885 int status = 0;
1886
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05301887 DBG_ERROR("sxg: %s (%s) ENTER states[%d:%d] flags[%x]\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -07001888 __func__, adapter->netdev->name,
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05301889 adapter->state,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001890 adapter->linkstate, dev->flags);
1891
1892 /* adapter should be down at this point */
1893 if (adapter->state != ADAPT_DOWN) {
1894 DBG_ERROR("sxg_if_init adapter->state != ADAPT_DOWN\n");
1895 return (-EIO);
1896 }
1897 ASSERT(adapter->linkstate == LINK_DOWN);
1898
1899 adapter->devflags_prev = dev->flags;
Mithlesh Thukralb040b072009-01-28 07:08:11 +05301900 adapter->MacFilter = MAC_DIRECTED;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001901 if (dev->flags) {
Harvey Harrisone88bd232008-10-17 14:46:10 -07001902 DBG_ERROR("sxg: %s (%s) Set MAC options: ", __func__,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001903 adapter->netdev->name);
1904 if (dev->flags & IFF_BROADCAST) {
Mithlesh Thukralb040b072009-01-28 07:08:11 +05301905 adapter->MacFilter |= MAC_BCAST;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001906 DBG_ERROR("BCAST ");
1907 }
1908 if (dev->flags & IFF_PROMISC) {
Mithlesh Thukralb040b072009-01-28 07:08:11 +05301909 adapter->MacFilter |= MAC_PROMISC;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001910 DBG_ERROR("PROMISC ");
1911 }
1912 if (dev->flags & IFF_ALLMULTI) {
Mithlesh Thukralb040b072009-01-28 07:08:11 +05301913 adapter->MacFilter |= MAC_ALLMCAST;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001914 DBG_ERROR("ALL_MCAST ");
1915 }
1916 if (dev->flags & IFF_MULTICAST) {
Mithlesh Thukralb040b072009-01-28 07:08:11 +05301917 adapter->MacFilter |= MAC_MCAST;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001918 DBG_ERROR("MCAST ");
1919 }
1920 DBG_ERROR("\n");
1921 }
1922 status = sxg_register_interrupt(adapter);
1923 if (status != STATUS_SUCCESS) {
1924 DBG_ERROR("sxg_if_init: sxg_register_interrupt FAILED %x\n",
1925 status);
1926 sxg_deregister_interrupt(adapter);
1927 return (status);
1928 }
1929
1930 adapter->state = ADAPT_UP;
1931
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301932 /* clear any pending events, then enable interrupts */
Harvey Harrisone88bd232008-10-17 14:46:10 -07001933 DBG_ERROR("sxg: %s ENABLE interrupts(slic)\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001934
1935 return (STATUS_SUCCESS);
1936}
1937
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05301938void sxg_set_interrupt_aggregation(struct adapter_t *adapter)
1939{
1940 /*
1941 * Top bit disables aggregation on xmt (SXG_AGG_XMT_DISABLE).
1942 * Make sure Max is less than 0x8000.
1943 */
1944 adapter->max_aggregation = SXG_MAX_AGG_DEFAULT;
1945 adapter->min_aggregation = SXG_MIN_AGG_DEFAULT;
1946 WRITE_REG(adapter->UcodeRegs[0].Aggregation,
1947 ((adapter->max_aggregation << SXG_MAX_AGG_SHIFT) |
1948 adapter->min_aggregation),
1949 TRUE);
1950}
1951
Mithlesh Thukral942798b2009-01-05 21:14:34 +05301952static int sxg_entry_open(struct net_device *dev)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001953{
J.R. Mauro73b07062008-10-28 18:42:02 -04001954 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001955 int status;
Mithlesh Thukral0d414722009-01-19 20:29:59 +05301956 static int turn;
Mithlesh Thukral7c66b142009-02-06 19:30:40 +05301957 int sxg_initial_rcv_data_buffers = SXG_INITIAL_RCV_DATA_BUFFERS;
1958 int i;
1959
1960 if (adapter->JumboEnabled == TRUE) {
1961 sxg_initial_rcv_data_buffers =
1962 SXG_INITIAL_JUMBO_RCV_DATA_BUFFERS;
1963 SXG_INITIALIZE_RING(adapter->RcvRingZeroInfo,
1964 SXG_JUMBO_RCV_RING_SIZE);
1965 }
1966
1967 /*
1968 * Allocate receive data buffers. We allocate a block of buffers and
1969 * a corresponding descriptor block at once. See sxghw.h:SXG_RCV_BLOCK
1970 */
1971
1972 for (i = 0; i < sxg_initial_rcv_data_buffers;
1973 i += SXG_RCV_DESCRIPTORS_PER_BLOCK)
1974 {
1975 status = sxg_allocate_buffer_memory(adapter,
1976 SXG_RCV_BLOCK_SIZE(SXG_RCV_DATA_HDR_SIZE),
1977 SXG_BUFFER_TYPE_RCV);
1978 if (status != STATUS_SUCCESS)
1979 return status;
1980 }
1981 /*
1982 * NBL resource allocation can fail in the 'AllocateComplete' routine,
1983 * which doesn't return status. Make sure we got the number of buffers
1984 * we requested
1985 */
1986
1987 if (adapter->FreeRcvBufferCount < sxg_initial_rcv_data_buffers) {
1988 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF6",
1989 adapter, adapter->FreeRcvBufferCount, SXG_MAX_ENTRIES,
1990 0);
1991 return (STATUS_RESOURCES);
1992 }
1993 /*
1994 * The microcode expects it to be downloaded on every open.
1995 */
1996 DBG_ERROR("sxg: %s ENTER sxg_download_microcode\n", __FUNCTION__);
1997 if (sxg_download_microcode(adapter, SXG_UCODE_SAHARA)) {
1998 DBG_ERROR("sxg: %s ENTER sxg_adapter_set_hwaddr\n",
1999 __FUNCTION__);
2000 sxg_read_config(adapter);
2001 } else {
2002 adapter->state = ADAPT_FAIL;
2003 adapter->linkstate = LINK_DOWN;
2004 DBG_ERROR("sxg_download_microcode FAILED status[%x]\n",
2005 status);
2006 }
2007 msleep(5);
Mithlesh Thukral0d414722009-01-19 20:29:59 +05302008
2009 if (turn) {
2010 sxg_second_open(adapter->netdev);
2011
2012 return STATUS_SUCCESS;
2013 }
2014
2015 turn++;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002016
2017 ASSERT(adapter);
Harvey Harrisone88bd232008-10-17 14:46:10 -07002018 DBG_ERROR("sxg: %s adapter->activated[%d]\n", __func__,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002019 adapter->activated);
2020 DBG_ERROR
2021 ("sxg: %s (%s): [jiffies[%lx] cpu %d] dev[%p] adapt[%p] port[%d]\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -07002022 __func__, adapter->netdev->name, jiffies, smp_processor_id(),
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002023 adapter->netdev, adapter, adapter->port);
2024
2025 netif_stop_queue(adapter->netdev);
2026
2027 spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
2028 if (!adapter->activated) {
2029 sxg_global.num_sxg_ports_active++;
2030 adapter->activated = 1;
2031 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04002032 /* Initialize the adapter */
Harvey Harrisone88bd232008-10-17 14:46:10 -07002033 DBG_ERROR("sxg: %s ENTER sxg_initialize_adapter\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002034 status = sxg_initialize_adapter(adapter);
2035 DBG_ERROR("sxg: %s EXIT sxg_initialize_adapter status[%x]\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -07002036 __func__, status);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002037
2038 if (status == STATUS_SUCCESS) {
Harvey Harrisone88bd232008-10-17 14:46:10 -07002039 DBG_ERROR("sxg: %s ENTER sxg_if_init\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002040 status = sxg_if_init(adapter);
Harvey Harrisone88bd232008-10-17 14:46:10 -07002041 DBG_ERROR("sxg: %s EXIT sxg_if_init status[%x]\n", __func__,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002042 status);
2043 }
2044
2045 if (status != STATUS_SUCCESS) {
2046 if (adapter->activated) {
2047 sxg_global.num_sxg_ports_active--;
2048 adapter->activated = 0;
2049 }
2050 spin_unlock_irqrestore(&sxg_global.driver_lock,
2051 sxg_global.flags);
2052 return (status);
2053 }
Harvey Harrisone88bd232008-10-17 14:46:10 -07002054 DBG_ERROR("sxg: %s ENABLE ALL INTERRUPTS\n", __func__);
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05302055 sxg_set_interrupt_aggregation(adapter);
2056 napi_enable(&adapter->napi);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002057
J.R. Maurob243c4a2008-10-20 19:28:58 -04002058 /* Enable interrupts */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002059 SXG_ENABLE_ALL_INTERRUPTS(adapter);
2060
Harvey Harrisone88bd232008-10-17 14:46:10 -07002061 DBG_ERROR("sxg: %s EXIT\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002062
2063 spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
2064 return STATUS_SUCCESS;
2065}
2066
Mithlesh Thukral0d414722009-01-19 20:29:59 +05302067int sxg_second_open(struct net_device * dev)
2068{
2069 struct adapter_t *adapter = (struct adapter_t*) netdev_priv(dev);
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05302070 int status = 0;
Mithlesh Thukral0d414722009-01-19 20:29:59 +05302071
2072 spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
2073 netif_start_queue(adapter->netdev);
2074 adapter->state = ADAPT_UP;
2075 adapter->linkstate = LINK_UP;
2076
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05302077 status = sxg_initialize_adapter(adapter);
2078 sxg_set_interrupt_aggregation(adapter);
2079 napi_enable(&adapter->napi);
Mithlesh Thukral0d414722009-01-19 20:29:59 +05302080 /* Re-enable interrupts */
2081 SXG_ENABLE_ALL_INTERRUPTS(adapter);
2082
2083 netif_carrier_on(dev);
2084 spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
2085 sxg_register_interrupt(adapter);
2086 return (STATUS_SUCCESS);
2087
2088}
2089
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002090static void __devexit sxg_entry_remove(struct pci_dev *pcidev)
2091{
Mithlesh Thukral0d414722009-01-19 20:29:59 +05302092 u32 mmio_start = 0;
2093 u32 mmio_len = 0;
2094
Mithlesh Thukral942798b2009-01-05 21:14:34 +05302095 struct net_device *dev = pci_get_drvdata(pcidev);
J.R. Mauro73b07062008-10-28 18:42:02 -04002096 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05302097
2098 flush_scheduled_work();
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05302099
2100 /* Deallocate Resources */
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302101 unregister_netdev(dev);
2102 sxg_free_resources(adapter);
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05302103
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002104 ASSERT(adapter);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002105
Mithlesh Thukral0d414722009-01-19 20:29:59 +05302106 mmio_start = pci_resource_start(pcidev, 0);
2107 mmio_len = pci_resource_len(pcidev, 0);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002108
Mithlesh Thukral0d414722009-01-19 20:29:59 +05302109 DBG_ERROR("sxg: %s rel_region(0) start[%x] len[%x]\n", __FUNCTION__,
2110 mmio_start, mmio_len);
2111 release_mem_region(mmio_start, mmio_len);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002112
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05302113 mmio_start = pci_resource_start(pcidev, 2);
2114 mmio_len = pci_resource_len(pcidev, 2);
2115
2116 DBG_ERROR("sxg: %s rel_region(2) start[%x] len[%x]\n", __FUNCTION__,
2117 mmio_start, mmio_len);
2118 release_mem_region(mmio_start, mmio_len);
2119
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05302120 pci_disable_device(pcidev);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002121
Harvey Harrisone88bd232008-10-17 14:46:10 -07002122 DBG_ERROR("sxg: %s deallocate device\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002123 kfree(dev);
Harvey Harrisone88bd232008-10-17 14:46:10 -07002124 DBG_ERROR("sxg: %s EXIT\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002125}
2126
Mithlesh Thukral942798b2009-01-05 21:14:34 +05302127static int sxg_entry_halt(struct net_device *dev)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002128{
J.R. Mauro73b07062008-10-28 18:42:02 -04002129 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
Mithlesh Thukral7c66b142009-02-06 19:30:40 +05302130 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
2131 int i;
2132 u32 RssIds, IsrCount;
2133 unsigned long flags;
2134
2135 RssIds = SXG_RSS_CPU_COUNT(adapter);
2136 IsrCount = adapter->MsiEnabled ? RssIds : 1;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002137
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05302138 napi_disable(&adapter->napi);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002139 spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
Harvey Harrisone88bd232008-10-17 14:46:10 -07002140 DBG_ERROR("sxg: %s (%s) ENTER\n", __func__, dev->name);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002141
Mithlesh Thukral7c66b142009-02-06 19:30:40 +05302142 WRITE_REG(adapter->UcodeRegs[0].RcvCmd, 0, true);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002143 netif_stop_queue(adapter->netdev);
2144 adapter->state = ADAPT_DOWN;
2145 adapter->linkstate = LINK_DOWN;
2146 adapter->devflags_prev = 0;
2147 DBG_ERROR("sxg: %s (%s) set adapter[%p] state to ADAPT_DOWN(%d)\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -07002148 __func__, dev->name, adapter, adapter->state);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002149
Harvey Harrisone88bd232008-10-17 14:46:10 -07002150 DBG_ERROR("sxg: %s (%s) EXIT\n", __func__, dev->name);
2151 DBG_ERROR("sxg: %s EXIT\n", __func__);
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05302152
Mithlesh Thukral7c66b142009-02-06 19:30:40 +05302153 /* Disable interrupts */
2154 SXG_DISABLE_ALL_INTERRUPTS(adapter);
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05302155
Mithlesh Thukral0d414722009-01-19 20:29:59 +05302156 netif_carrier_off(dev);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002157 spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05302158
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302159 sxg_deregister_interrupt(adapter);
Mithlesh Thukral7c66b142009-02-06 19:30:40 +05302160 WRITE_REG(HwRegs->Reset, 0xDEAD, FLUSH);
2161 mdelay(5000);
2162 spin_lock(&adapter->RcvQLock);
2163 /* Free all the blocks and the buffers, moved from remove() routine */
2164 if (!(IsListEmpty(&adapter->AllRcvBlocks))) {
2165 sxg_free_rcvblocks(adapter);
2166 }
2167
2168
2169 InitializeListHead(&adapter->FreeRcvBuffers);
2170 InitializeListHead(&adapter->FreeRcvBlocks);
2171 InitializeListHead(&adapter->AllRcvBlocks);
2172 InitializeListHead(&adapter->FreeSglBuffers);
2173 InitializeListHead(&adapter->AllSglBuffers);
2174
2175 adapter->FreeRcvBufferCount = 0;
2176 adapter->FreeRcvBlockCount = 0;
2177 adapter->AllRcvBlockCount = 0;
2178 adapter->RcvBuffersOnCard = 0;
2179 adapter->PendingRcvCount = 0;
2180
2181 memset(adapter->RcvRings, 0, sizeof(struct sxg_rcv_ring) * 1);
2182 memset(adapter->EventRings, 0, sizeof(struct sxg_event_ring) * RssIds);
2183 memset(adapter->Isr, 0, sizeof(u32) * IsrCount);
2184 for (i = 0; i < SXG_MAX_RING_SIZE; i++)
2185 adapter->RcvRingZeroInfo.Context[i] = NULL;
2186 SXG_INITIALIZE_RING(adapter->RcvRingZeroInfo, SXG_RCV_RING_SIZE);
2187 SXG_INITIALIZE_RING(adapter->XmtRingZeroInfo, SXG_XMT_RING_SIZE);
2188
2189 spin_unlock(&adapter->RcvQLock);
2190
2191 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
2192 adapter->AllSglBufferCount = 0;
2193 adapter->FreeSglBufferCount = 0;
2194 adapter->PendingXmtCount = 0;
2195 memset(adapter->XmtRings, 0, sizeof(struct sxg_xmt_ring) * 1);
2196 memset(adapter->XmtRingZeroIndex, 0, sizeof(u32));
2197 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
2198
2199
2200 for (i = 0; i < SXG_MAX_RSS; i++) {
2201 adapter->NextEvent[i] = 0;
2202 }
2203 atomic_set(&adapter->pending_allocations, 0);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002204 return (STATUS_SUCCESS);
2205}
2206
Mithlesh Thukral942798b2009-01-05 21:14:34 +05302207static int sxg_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002208{
2209 ASSERT(rq);
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302210/* DBG_ERROR("sxg: %s cmd[%x] rq[%p] dev[%p]\n", __func__, cmd, rq, dev);*/
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002211 switch (cmd) {
2212 case SIOCSLICSETINTAGG:
2213 {
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302214 /* struct adapter_t *adapter = (struct adapter_t *)
2215 * netdev_priv(dev);
2216 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002217 u32 data[7];
2218 u32 intagg;
2219
2220 if (copy_from_user(data, rq->ifr_data, 28)) {
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302221 DBG_ERROR("copy_from_user FAILED getting \
2222 initial params\n");
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002223 return -EFAULT;
2224 }
2225 intagg = data[0];
2226 printk(KERN_EMERG
2227 "%s: set interrupt aggregation to %d\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -07002228 __func__, intagg);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002229 return 0;
2230 }
2231
2232 default:
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302233 /* DBG_ERROR("sxg: %s UNSUPPORTED[%x]\n", __func__, cmd); */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002234 return -EOPNOTSUPP;
2235 }
2236 return 0;
2237}
2238
2239#define NORMAL_ETHFRAME 0
2240
2241/*
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002242 * sxg_send_packets - Send a skb packet
2243 *
2244 * Arguments:
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302245 * skb - The packet to send
2246 * dev - Our linux net device that refs our adapter
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002247 *
2248 * Return:
2249 * 0 regardless of outcome XXXTODO refer to e1000 driver
2250 */
Mithlesh Thukral942798b2009-01-05 21:14:34 +05302251static int sxg_send_packets(struct sk_buff *skb, struct net_device *dev)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002252{
J.R. Mauro73b07062008-10-28 18:42:02 -04002253 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002254 u32 status = STATUS_SUCCESS;
2255
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302256 /*
2257 * DBG_ERROR("sxg: %s ENTER sxg_send_packets skb[%p]\n", __FUNCTION__,
2258 * skb);
2259 */
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05302260
J.R. Maurob243c4a2008-10-20 19:28:58 -04002261 /* Check the adapter state */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002262 switch (adapter->State) {
2263 case SXG_STATE_INITIALIZING:
2264 case SXG_STATE_HALTED:
2265 case SXG_STATE_SHUTDOWN:
J.R. Maurob243c4a2008-10-20 19:28:58 -04002266 ASSERT(0); /* unexpected */
2267 /* fall through */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002268 case SXG_STATE_RESETTING:
2269 case SXG_STATE_SLEEP:
2270 case SXG_STATE_BOOTDIAG:
2271 case SXG_STATE_DIAG:
2272 case SXG_STATE_HALTING:
2273 status = STATUS_FAILURE;
2274 break;
2275 case SXG_STATE_RUNNING:
2276 if (adapter->LinkState != SXG_LINK_UP) {
2277 status = STATUS_FAILURE;
2278 }
2279 break;
2280 default:
2281 ASSERT(0);
2282 status = STATUS_FAILURE;
2283 }
2284 if (status != STATUS_SUCCESS) {
2285 goto xmit_fail;
2286 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04002287 /* send a packet */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002288 status = sxg_transmit_packet(adapter, skb);
2289 if (status == STATUS_SUCCESS) {
2290 goto xmit_done;
2291 }
2292
2293 xmit_fail:
J.R. Maurob243c4a2008-10-20 19:28:58 -04002294 /* reject & complete all the packets if they cant be sent */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002295 if (status != STATUS_SUCCESS) {
2296#if XXXTODO
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302297 /* sxg_send_packets_fail(adapter, skb, status); */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002298#else
2299 SXG_DROP_DUMB_SEND(adapter, skb);
2300 adapter->stats.tx_dropped++;
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302301 return NETDEV_TX_BUSY;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002302#endif
2303 }
Harvey Harrisone88bd232008-10-17 14:46:10 -07002304 DBG_ERROR("sxg: %s EXIT sxg_send_packets status[%x]\n", __func__,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002305 status);
2306
2307 xmit_done:
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302308 return NETDEV_TX_OK;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002309}
2310
2311/*
2312 * sxg_transmit_packet
2313 *
2314 * This function transmits a single packet.
2315 *
2316 * Arguments -
2317 * adapter - Pointer to our adapter structure
2318 * skb - The packet to be sent
2319 *
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302320 * Return - STATUS of send
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002321 */
J.R. Mauro73b07062008-10-28 18:42:02 -04002322static int sxg_transmit_packet(struct adapter_t *adapter, struct sk_buff *skb)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002323{
Mithlesh Thukral942798b2009-01-05 21:14:34 +05302324 struct sxg_x64_sgl *pSgl;
2325 struct sxg_scatter_gather *SxgSgl;
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302326 unsigned long sgl_flags;
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05302327 /* void *SglBuffer; */
2328 /* u32 SglBufferLength; */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002329
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302330 /*
2331 * The vast majority of work is done in the shared
2332 * sxg_dumb_sgl routine.
2333 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002334 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSend",
2335 adapter, skb, 0, 0);
2336
J.R. Maurob243c4a2008-10-20 19:28:58 -04002337 /* Allocate a SGL buffer */
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302338 SXG_GET_SGL_BUFFER(adapter, SxgSgl, 0);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002339 if (!SxgSgl) {
2340 adapter->Stats.NoSglBuf++;
Mithlesh Thukral54aed112009-01-19 20:27:17 +05302341 adapter->stats.tx_errors++;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002342 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "SndPktF1",
2343 adapter, skb, 0, 0);
2344 return (STATUS_RESOURCES);
2345 }
2346 ASSERT(SxgSgl->adapter == adapter);
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05302347 /*SglBuffer = SXG_SGL_BUFFER(SxgSgl);
2348 SglBufferLength = SXG_SGL_BUF_SIZE; */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002349 SxgSgl->VlanTag.VlanTci = 0;
2350 SxgSgl->VlanTag.VlanTpid = 0;
2351 SxgSgl->Type = SXG_SGL_DUMB;
2352 SxgSgl->DumbPacket = skb;
2353 pSgl = NULL;
2354
J.R. Maurob243c4a2008-10-20 19:28:58 -04002355 /* Call the common sxg_dumb_sgl routine to complete the send. */
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302356 return (sxg_dumb_sgl(pSgl, SxgSgl));
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002357}
2358
2359/*
2360 * sxg_dumb_sgl
2361 *
2362 * Arguments:
2363 * pSgl -
Mithlesh Thukral942798b2009-01-05 21:14:34 +05302364 * SxgSgl - struct sxg_scatter_gather
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002365 *
2366 * Return Value:
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302367 * Status of send operation.
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002368 */
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302369static int sxg_dumb_sgl(struct sxg_x64_sgl *pSgl,
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302370 struct sxg_scatter_gather *SxgSgl)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002371{
J.R. Mauro73b07062008-10-28 18:42:02 -04002372 struct adapter_t *adapter = SxgSgl->adapter;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002373 struct sk_buff *skb = SxgSgl->DumbPacket;
J.R. Maurob243c4a2008-10-20 19:28:58 -04002374 /* For now, all dumb-nic sends go on RSS queue zero */
Mithlesh Thukral942798b2009-01-05 21:14:34 +05302375 struct sxg_xmt_ring *XmtRing = &adapter->XmtRings[0];
2376 struct sxg_ring_info *XmtRingInfo = &adapter->XmtRingZeroInfo;
2377 struct sxg_cmd *XmtCmd = NULL;
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302378 /* u32 Index = 0; */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002379 u32 DataLength = skb->len;
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302380 /* unsigned int BufLen; */
2381 /* u32 SglOffset; */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002382 u64 phys_addr;
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302383 unsigned long flags;
Mithlesh Thukral0d414722009-01-19 20:29:59 +05302384 unsigned long queue_id=0;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002385
2386 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSgl",
2387 pSgl, SxgSgl, 0, 0);
2388
J.R. Maurob243c4a2008-10-20 19:28:58 -04002389 /* Set aside a pointer to the sgl */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002390 SxgSgl->pSgl = pSgl;
2391
J.R. Maurob243c4a2008-10-20 19:28:58 -04002392 /* Sanity check that our SGL format is as we expect. */
Mithlesh Thukral942798b2009-01-05 21:14:34 +05302393 ASSERT(sizeof(struct sxg_x64_sge) == sizeof(struct sxg_x64_sge));
J.R. Maurob243c4a2008-10-20 19:28:58 -04002394 /* Shouldn't be a vlan tag on this frame */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002395 ASSERT(SxgSgl->VlanTag.VlanTci == 0);
2396 ASSERT(SxgSgl->VlanTag.VlanTpid == 0);
2397
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302398 /*
2399 * From here below we work with the SGL placed in our
2400 * buffer.
2401 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002402
2403 SxgSgl->Sgl.NumberOfElements = 1;
Mithlesh Thukral0d414722009-01-19 20:29:59 +05302404 /*
2405 * Set ucode Queue ID based on bottom bits of destination TCP port.
2406 * This Queue ID splits slowpath/dumb-nic packet processing across
2407 * multiple threads on the card to improve performance. It is split
2408 * using the TCP port to avoid out-of-order packets that can result
2409 * from multithreaded processing. We use the destination port because
2410 * we expect to be run on a server, so in nearly all cases the local
2411 * port is likely to be constant (well-known server port) and the
2412 * remote port is likely to be random. The exception to this is iSCSI,
2413 * in which case we use the sport instead. Note
2414 * that original attempt at XOR'ing source and dest port resulted in
2415 * poor balance on NTTTCP/iometer applications since they tend to
2416 * line up (even-even, odd-odd..).
2417 */
2418
2419 if (skb->protocol == htons(ETH_P_IP)) {
2420 struct iphdr *ip;
2421
2422 ip = ip_hdr(skb);
2423 if ((ip->protocol == IPPROTO_TCP)&&(DataLength >= sizeof(
2424 struct tcphdr))){
2425 queue_id = ((ntohs(tcp_hdr(skb)->dest) == ISCSI_PORT) ?
2426 (ntohs (tcp_hdr(skb)->source) &
2427 SXG_LARGE_SEND_QUEUE_MASK):
2428 (ntohs(tcp_hdr(skb)->dest) &
2429 SXG_LARGE_SEND_QUEUE_MASK));
2430 }
2431 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2432 if ( (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) && (DataLength >=
2433 sizeof(struct tcphdr)) ) {
2434 queue_id = ((ntohs(tcp_hdr(skb)->dest) == ISCSI_PORT) ?
2435 (ntohs (tcp_hdr(skb)->source) &
2436 SXG_LARGE_SEND_QUEUE_MASK):
2437 (ntohs(tcp_hdr(skb)->dest) &
2438 SXG_LARGE_SEND_QUEUE_MASK));
2439 }
2440 }
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002441
J.R. Maurob243c4a2008-10-20 19:28:58 -04002442 /* Grab the spinlock and acquire a command */
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302443 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002444 SXG_GET_CMD(XmtRing, XmtRingInfo, XmtCmd, SxgSgl);
2445 if (XmtCmd == NULL) {
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302446 /*
2447 * Call sxg_complete_slow_send to see if we can
2448 * free up any XmtRingZero entries and then try again
2449 */
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302450
2451 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
Mithlesh Thukralc5e5cf52009-02-06 19:31:40 +05302452 sxg_complete_slow_send(adapter);
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302453 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002454 SXG_GET_CMD(XmtRing, XmtRingInfo, XmtCmd, SxgSgl);
2455 if (XmtCmd == NULL) {
2456 adapter->Stats.XmtZeroFull++;
2457 goto abortcmd;
2458 }
2459 }
2460 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbCmd",
2461 XmtCmd, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
J.R. Maurob243c4a2008-10-20 19:28:58 -04002462 /* Update stats */
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05302463 adapter->stats.tx_packets++;
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05302464 adapter->stats.tx_bytes += DataLength;
J.R. Maurob243c4a2008-10-20 19:28:58 -04002465#if XXXTODO /* Stats stuff */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002466 if (SXG_MULTICAST_PACKET(EtherHdr)) {
2467 if (SXG_BROADCAST_PACKET(EtherHdr)) {
2468 adapter->Stats.DumbXmtBcastPkts++;
2469 adapter->Stats.DumbXmtBcastBytes += DataLength;
2470 } else {
2471 adapter->Stats.DumbXmtMcastPkts++;
2472 adapter->Stats.DumbXmtMcastBytes += DataLength;
2473 }
2474 } else {
2475 adapter->Stats.DumbXmtUcastPkts++;
2476 adapter->Stats.DumbXmtUcastBytes += DataLength;
2477 }
2478#endif
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302479 /*
2480 * Fill in the command
2481 * Copy out the first SGE to the command and adjust for offset
2482 */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302483 phys_addr = pci_map_single(adapter->pcidev, skb->data, skb->len,
J.R. Mauro5c7514e2008-10-05 20:38:52 -04002484 PCI_DMA_TODEVICE);
Mithlesh Thukral7c66b142009-02-06 19:30:40 +05302485
2486 /*
2487 * SAHARA SGL WORKAROUND
2488 * See if the SGL straddles a 64k boundary. If so, skip to
2489 * the start of the next 64k boundary and continue
2490 */
2491
2492 if (SXG_INVALID_SGL(phys_addr,skb->data_len))
2493 {
2494 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
2495 /* Silently drop this packet */
2496 printk(KERN_EMERG"Dropped a packet for 64k boundary problem\n");
2497 return STATUS_SUCCESS;
2498 }
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05302499 memset(XmtCmd, '\0', sizeof(*XmtCmd));
2500 XmtCmd->Buffer.FirstSgeAddress = phys_addr;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002501 XmtCmd->Buffer.FirstSgeLength = DataLength;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002502 XmtCmd->Buffer.SgeOffset = 0;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002503 XmtCmd->Buffer.TotalLength = DataLength;
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05302504 XmtCmd->SgEntries = 1;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002505 XmtCmd->Flags = 0;
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302506 /*
2507 * Advance transmit cmd descripter by 1.
2508 * NOTE - See comments in SxgTcpOutput where we write
2509 * to the XmtCmd register regarding CPU ID values and/or
2510 * multiple commands.
Mithlesh Thukral0d414722009-01-19 20:29:59 +05302511 * Top 16 bits specify queue_id. See comments about queue_id above
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302512 */
Mithlesh Thukral0d414722009-01-19 20:29:59 +05302513 /* Four queues at the moment */
2514 ASSERT((queue_id & ~SXG_LARGE_SEND_QUEUE_MASK) == 0);
2515 WRITE_REG(adapter->UcodeRegs[0].XmtCmd, ((queue_id << 16) | 1), TRUE);
J.R. Maurob243c4a2008-10-20 19:28:58 -04002516 adapter->Stats.XmtQLen++; /* Stats within lock */
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302517 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002518 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDumSgl2",
2519 XmtCmd, pSgl, SxgSgl, 0);
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302520 return STATUS_SUCCESS;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002521
2522 abortcmd:
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302523 /*
2524 * NOTE - Only jump to this label AFTER grabbing the
2525 * XmtZeroLock, and DO NOT DROP IT between the
2526 * command allocation and the following abort.
2527 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002528 if (XmtCmd) {
2529 SXG_ABORT_CMD(XmtRingInfo);
2530 }
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302531 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002532
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302533/*
2534 * failsgl:
2535 * Jump to this label if failure occurs before the
2536 * XmtZeroLock is grabbed
2537 */
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05302538 adapter->stats.tx_errors++;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002539 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumSGFal",
2540 pSgl, SxgSgl, XmtRingInfo->Head, XmtRingInfo->Tail);
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302541 /* SxgSgl->DumbPacket is the skb */
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302542 // SXG_COMPLETE_DUMB_SEND(adapter, SxgSgl->DumbPacket);
Mithlesh Thukral54aed112009-01-19 20:27:17 +05302543
2544 return STATUS_FAILURE;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002545}
2546
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002547/*
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302548 * Link management functions
2549 *
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002550 * sxg_initialize_link - Initialize the link stuff
2551 *
2552 * Arguments -
2553 * adapter - A pointer to our adapter structure
2554 *
2555 * Return
2556 * status
2557 */
J.R. Mauro73b07062008-10-28 18:42:02 -04002558static int sxg_initialize_link(struct adapter_t *adapter)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002559{
Mithlesh Thukral942798b2009-01-05 21:14:34 +05302560 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002561 u32 Value;
2562 u32 ConfigData;
2563 u32 MaxFrame;
2564 int status;
2565
2566 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "InitLink",
2567 adapter, 0, 0, 0);
2568
J.R. Maurob243c4a2008-10-20 19:28:58 -04002569 /* Reset PHY and XGXS module */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002570 WRITE_REG(HwRegs->LinkStatus, LS_SERDES_POWER_DOWN, TRUE);
2571
J.R. Maurob243c4a2008-10-20 19:28:58 -04002572 /* Reset transmit configuration register */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002573 WRITE_REG(HwRegs->XmtConfig, XMT_CONFIG_RESET, TRUE);
2574
J.R. Maurob243c4a2008-10-20 19:28:58 -04002575 /* Reset receive configuration register */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002576 WRITE_REG(HwRegs->RcvConfig, RCV_CONFIG_RESET, TRUE);
2577
J.R. Maurob243c4a2008-10-20 19:28:58 -04002578 /* Reset all MAC modules */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002579 WRITE_REG(HwRegs->MacConfig0, AXGMAC_CFG0_SUB_RESET, TRUE);
2580
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302581 /*
2582 * Link address 0
2583 * XXXTODO - This assumes the MAC address (0a:0b:0c:0d:0e:0f)
2584 * is stored with the first nibble (0a) in the byte 0
2585 * of the Mac address. Possibly reverse?
2586 */
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05302587 Value = *(u32 *) adapter->macaddr;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002588 WRITE_REG(HwRegs->LinkAddress0Low, Value, TRUE);
J.R. Maurob243c4a2008-10-20 19:28:58 -04002589 /* also write the MAC address to the MAC. Endian is reversed. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002590 WRITE_REG(HwRegs->MacAddressLow, ntohl(Value), TRUE);
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05302591 Value = (*(u16 *) & adapter->macaddr[4] & 0x0000FFFF);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002592 WRITE_REG(HwRegs->LinkAddress0High, Value | LINK_ADDRESS_ENABLE, TRUE);
J.R. Maurob243c4a2008-10-20 19:28:58 -04002593 /* endian swap for the MAC (put high bytes in bits [31:16], swapped) */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002594 Value = ntohl(Value);
2595 WRITE_REG(HwRegs->MacAddressHigh, Value, TRUE);
J.R. Maurob243c4a2008-10-20 19:28:58 -04002596 /* Link address 1 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002597 WRITE_REG(HwRegs->LinkAddress1Low, 0, TRUE);
2598 WRITE_REG(HwRegs->LinkAddress1High, 0, TRUE);
J.R. Maurob243c4a2008-10-20 19:28:58 -04002599 /* Link address 2 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002600 WRITE_REG(HwRegs->LinkAddress2Low, 0, TRUE);
2601 WRITE_REG(HwRegs->LinkAddress2High, 0, TRUE);
J.R. Maurob243c4a2008-10-20 19:28:58 -04002602 /* Link address 3 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002603 WRITE_REG(HwRegs->LinkAddress3Low, 0, TRUE);
2604 WRITE_REG(HwRegs->LinkAddress3High, 0, TRUE);
2605
J.R. Maurob243c4a2008-10-20 19:28:58 -04002606 /* Enable MAC modules */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002607 WRITE_REG(HwRegs->MacConfig0, 0, TRUE);
2608
J.R. Maurob243c4a2008-10-20 19:28:58 -04002609 /* Configure MAC */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302610 WRITE_REG(HwRegs->MacConfig1, (
2611 /* Allow sending of pause */
2612 AXGMAC_CFG1_XMT_PAUSE |
2613 /* Enable XMT */
2614 AXGMAC_CFG1_XMT_EN |
2615 /* Enable detection of pause */
2616 AXGMAC_CFG1_RCV_PAUSE |
2617 /* Enable receive */
2618 AXGMAC_CFG1_RCV_EN |
2619 /* short frame detection */
2620 AXGMAC_CFG1_SHORT_ASSERT |
2621 /* Verify frame length */
2622 AXGMAC_CFG1_CHECK_LEN |
2623 /* Generate FCS */
2624 AXGMAC_CFG1_GEN_FCS |
2625 /* Pad frames to 64 bytes */
2626 AXGMAC_CFG1_PAD_64),
2627 TRUE);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002628
J.R. Maurob243c4a2008-10-20 19:28:58 -04002629 /* Set AXGMAC max frame length if jumbo. Not needed for standard MTU */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002630 if (adapter->JumboEnabled) {
2631 WRITE_REG(HwRegs->MacMaxFrameLen, AXGMAC_MAXFRAME_JUMBO, TRUE);
2632 }
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302633 /*
2634 * AMIIM Configuration Register -
2635 * The value placed in the AXGMAC_AMIIM_CFG_HALF_CLOCK portion
2636 * (bottom bits) of this register is used to determine the MDC frequency
2637 * as specified in the A-XGMAC Design Document. This value must not be
2638 * zero. The following value (62 or 0x3E) is based on our MAC transmit
2639 * clock frequency (MTCLK) of 312.5 MHz. Given a maximum MDIO clock
2640 * frequency of 2.5 MHz (see the PHY spec), we get:
2641 * 312.5/(2*(X+1)) < 2.5 ==> X = 62.
2642 * This value happens to be the default value for this register, so we
2643 * really don't have to do this.
2644 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002645 WRITE_REG(HwRegs->MacAmiimConfig, 0x0000003E, TRUE);
2646
J.R. Maurob243c4a2008-10-20 19:28:58 -04002647 /* Power up and enable PHY and XAUI/XGXS/Serdes logic */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002648 WRITE_REG(HwRegs->LinkStatus,
2649 (LS_PHY_CLR_RESET |
2650 LS_XGXS_ENABLE |
2651 LS_XGXS_CTL | LS_PHY_CLK_EN | LS_ATTN_ALARM), TRUE);
2652 DBG_ERROR("After Power Up and enable PHY in sxg_initialize_link\n");
2653
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302654 /*
2655 * Per information given by Aeluros, wait 100 ms after removing reset.
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302656 * It's not enough to wait for the self-clearing reset bit in reg 0 to
2657 * clear.
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302658 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002659 mdelay(100);
2660
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302661 /* Verify the PHY has come up by checking that the Reset bit has
2662 * cleared.
2663 */
2664 status = sxg_read_mdio_reg(adapter,
2665 MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
2666 PHY_PMA_CONTROL1, /* PMA/PMD control register */
2667 &Value);
2668 DBG_ERROR("After sxg_read_mdio_reg Value[%x] fail=%x\n", Value,
2669 (Value & PMA_CONTROL1_RESET));
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002670 if (status != STATUS_SUCCESS)
2671 return (STATUS_FAILURE);
J.R. Maurob243c4a2008-10-20 19:28:58 -04002672 if (Value & PMA_CONTROL1_RESET) /* reset complete if bit is 0 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002673 return (STATUS_FAILURE);
2674
J.R. Maurob243c4a2008-10-20 19:28:58 -04002675 /* The SERDES should be initialized by now - confirm */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002676 READ_REG(HwRegs->LinkStatus, Value);
J.R. Maurob243c4a2008-10-20 19:28:58 -04002677 if (Value & LS_SERDES_DOWN) /* verify SERDES is initialized */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002678 return (STATUS_FAILURE);
2679
J.R. Maurob243c4a2008-10-20 19:28:58 -04002680 /* The XAUI link should also be up - confirm */
2681 if (!(Value & LS_XAUI_LINK_UP)) /* verify XAUI link is up */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002682 return (STATUS_FAILURE);
2683
J.R. Maurob243c4a2008-10-20 19:28:58 -04002684 /* Initialize the PHY */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002685 status = sxg_phy_init(adapter);
2686 if (status != STATUS_SUCCESS)
2687 return (STATUS_FAILURE);
2688
J.R. Maurob243c4a2008-10-20 19:28:58 -04002689 /* Enable the Link Alarm */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302690
2691 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module
2692 * LASI_CONTROL - LASI control register
2693 * LASI_CTL_LS_ALARM_ENABLE - enable link alarm bit
2694 */
2695 status = sxg_write_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
2696 LASI_CONTROL,
2697 LASI_CTL_LS_ALARM_ENABLE);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002698 if (status != STATUS_SUCCESS)
2699 return (STATUS_FAILURE);
2700
J.R. Maurob243c4a2008-10-20 19:28:58 -04002701 /* XXXTODO - temporary - verify bit is set */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302702
2703 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module
2704 * LASI_CONTROL - LASI control register
2705 */
2706 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
2707 LASI_CONTROL,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002708 &Value);
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302709
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002710 if (status != STATUS_SUCCESS)
2711 return (STATUS_FAILURE);
2712 if (!(Value & LASI_CTL_LS_ALARM_ENABLE)) {
2713 DBG_ERROR("Error! LASI Control Alarm Enable bit not set!\n");
2714 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04002715 /* Enable receive */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002716 MaxFrame = adapter->JumboEnabled ? JUMBOMAXFRAME : ETHERMAXFRAME;
2717 ConfigData = (RCV_CONFIG_ENABLE |
2718 RCV_CONFIG_ENPARSE |
2719 RCV_CONFIG_RCVBAD |
2720 RCV_CONFIG_RCVPAUSE |
2721 RCV_CONFIG_TZIPV6 |
2722 RCV_CONFIG_TZIPV4 |
2723 RCV_CONFIG_HASH_16 |
2724 RCV_CONFIG_SOCKET | RCV_CONFIG_BUFSIZE(MaxFrame));
2725 WRITE_REG(HwRegs->RcvConfig, ConfigData, TRUE);
2726
2727 WRITE_REG(HwRegs->XmtConfig, XMT_CONFIG_ENABLE, TRUE);
2728
J.R. Maurob243c4a2008-10-20 19:28:58 -04002729 /* Mark the link as down. We'll get a link event when it comes up. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002730 sxg_link_state(adapter, SXG_LINK_DOWN);
2731
2732 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XInitLnk",
2733 adapter, 0, 0, 0);
2734 return (STATUS_SUCCESS);
2735}
2736
2737/*
2738 * sxg_phy_init - Initialize the PHY
2739 *
2740 * Arguments -
2741 * adapter - A pointer to our adapter structure
2742 *
2743 * Return
2744 * status
2745 */
J.R. Mauro73b07062008-10-28 18:42:02 -04002746static int sxg_phy_init(struct adapter_t *adapter)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002747{
2748 u32 Value;
Mithlesh Thukral942798b2009-01-05 21:14:34 +05302749 struct phy_ucode *p;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002750 int status;
2751
Harvey Harrisone88bd232008-10-17 14:46:10 -07002752 DBG_ERROR("ENTER %s\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002753
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302754 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module
2755 * 0xC205 - PHY ID register (?)
2756 * &Value - XXXTODO - add def
2757 */
2758 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
2759 0xC205,
2760 &Value);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002761 if (status != STATUS_SUCCESS)
2762 return (STATUS_FAILURE);
2763
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302764 if (Value == 0x0012) {
2765 /* 0x0012 == AEL2005C PHY(?) - XXXTODO - add def */
2766 DBG_ERROR("AEL2005C PHY detected. Downloading PHY \
2767 microcode.\n");
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002768
J.R. Maurob243c4a2008-10-20 19:28:58 -04002769 /* Initialize AEL2005C PHY and download PHY microcode */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002770 for (p = PhyUcode; p->Addr != 0xFFFF; p++) {
2771 if (p->Addr == 0) {
J.R. Maurob243c4a2008-10-20 19:28:58 -04002772 /* if address == 0, data == sleep time in ms */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002773 mdelay(p->Data);
2774 } else {
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302775 /* write the given data to the specified address */
2776 status = sxg_write_mdio_reg(adapter,
2777 MIIM_DEV_PHY_PMA,
2778 /* PHY address */
2779 p->Addr,
2780 /* PHY data */
2781 p->Data);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002782 if (status != STATUS_SUCCESS)
2783 return (STATUS_FAILURE);
2784 }
2785 }
2786 }
Harvey Harrisone88bd232008-10-17 14:46:10 -07002787 DBG_ERROR("EXIT %s\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002788
2789 return (STATUS_SUCCESS);
2790}
2791
2792/*
2793 * sxg_link_event - Process a link event notification from the card
2794 *
2795 * Arguments -
2796 * adapter - A pointer to our adapter structure
2797 *
2798 * Return
2799 * None
2800 */
J.R. Mauro73b07062008-10-28 18:42:02 -04002801static void sxg_link_event(struct adapter_t *adapter)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002802{
Mithlesh Thukral942798b2009-01-05 21:14:34 +05302803 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
Mithlesh Thukral0d414722009-01-19 20:29:59 +05302804 struct net_device *netdev = adapter->netdev;
J.R. Mauro73b07062008-10-28 18:42:02 -04002805 enum SXG_LINK_STATE LinkState;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002806 int status;
2807 u32 Value;
2808
2809 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "LinkEvnt",
2810 adapter, 0, 0, 0);
Harvey Harrisone88bd232008-10-17 14:46:10 -07002811 DBG_ERROR("ENTER %s\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002812
J.R. Maurob243c4a2008-10-20 19:28:58 -04002813 /* Check the Link Status register. We should have a Link Alarm. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002814 READ_REG(HwRegs->LinkStatus, Value);
2815 if (Value & LS_LINK_ALARM) {
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302816 /*
2817 * We got a Link Status alarm. First, pause to let the
2818 * link state settle (it can bounce a number of times)
2819 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002820 mdelay(10);
2821
J.R. Maurob243c4a2008-10-20 19:28:58 -04002822 /* Now clear the alarm by reading the LASI status register. */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302823 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module */
2824 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
2825 /* LASI status register */
2826 LASI_STATUS,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002827 &Value);
2828 if (status != STATUS_SUCCESS) {
2829 DBG_ERROR("Error reading LASI Status MDIO register!\n");
2830 sxg_link_state(adapter, SXG_LINK_DOWN);
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302831 /* ASSERT(0); */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002832 }
2833 ASSERT(Value & LASI_STATUS_LS_ALARM);
2834
J.R. Maurob243c4a2008-10-20 19:28:58 -04002835 /* Now get and set the link state */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002836 LinkState = sxg_get_link_state(adapter);
2837 sxg_link_state(adapter, LinkState);
2838 DBG_ERROR("SXG: Link Alarm occurred. Link is %s\n",
2839 ((LinkState == SXG_LINK_UP) ? "UP" : "DOWN"));
Mithlesh Thukral0d414722009-01-19 20:29:59 +05302840 if (LinkState == SXG_LINK_UP)
2841 netif_carrier_on(netdev);
2842 else
2843 netif_carrier_off(netdev);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002844 } else {
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302845 /*
2846 * XXXTODO - Assuming Link Attention is only being generated
2847 * for the Link Alarm pin (and not for a XAUI Link Status change)
2848 * , then it's impossible to get here. Yet we've gotten here
2849 * twice (under extreme conditions - bouncing the link up and
2850 * down many times a second). Needs further investigation.
2851 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002852 DBG_ERROR("SXG: sxg_link_event: Can't get here!\n");
2853 DBG_ERROR("SXG: Link Status == 0x%08X.\n", Value);
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302854 /* ASSERT(0); */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002855 }
Harvey Harrisone88bd232008-10-17 14:46:10 -07002856 DBG_ERROR("EXIT %s\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002857
2858}
2859
2860/*
2861 * sxg_get_link_state - Determine if the link is up or down
2862 *
2863 * Arguments -
2864 * adapter - A pointer to our adapter structure
2865 *
2866 * Return
2867 * Link State
2868 */
J.R. Mauro73b07062008-10-28 18:42:02 -04002869static enum SXG_LINK_STATE sxg_get_link_state(struct adapter_t *adapter)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002870{
2871 int status;
2872 u32 Value;
2873
Harvey Harrisone88bd232008-10-17 14:46:10 -07002874 DBG_ERROR("ENTER %s\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002875
2876 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "GetLink",
2877 adapter, 0, 0, 0);
2878
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302879 /*
2880 * Per the Xenpak spec (and the IEEE 10Gb spec?), the link is up if
2881 * the following 3 bits (from 3 different MDIO registers) are all true.
2882 */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302883
2884 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module */
2885 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
2886 /* PMA/PMD Receive Signal Detect register */
2887 PHY_PMA_RCV_DET,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002888 &Value);
2889 if (status != STATUS_SUCCESS)
2890 goto bad;
2891
J.R. Maurob243c4a2008-10-20 19:28:58 -04002892 /* If PMA/PMD receive signal detect is 0, then the link is down */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002893 if (!(Value & PMA_RCV_DETECT))
2894 return (SXG_LINK_DOWN);
2895
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302896 /* MIIM_DEV_PHY_PCS - PHY PCS module */
2897 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PCS,
2898 /* PCS 10GBASE-R Status 1 register */
2899 PHY_PCS_10G_STATUS1,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002900 &Value);
2901 if (status != STATUS_SUCCESS)
2902 goto bad;
2903
J.R. Maurob243c4a2008-10-20 19:28:58 -04002904 /* If PCS is not locked to receive blocks, then the link is down */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002905 if (!(Value & PCS_10B_BLOCK_LOCK))
2906 return (SXG_LINK_DOWN);
2907
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302908 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_XS,/* PHY XS module */
2909 /* XS Lane Status register */
2910 PHY_XS_LANE_STATUS,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002911 &Value);
2912 if (status != STATUS_SUCCESS)
2913 goto bad;
2914
J.R. Maurob243c4a2008-10-20 19:28:58 -04002915 /* If XS transmit lanes are not aligned, then the link is down */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002916 if (!(Value & XS_LANE_ALIGN))
2917 return (SXG_LINK_DOWN);
2918
J.R. Maurob243c4a2008-10-20 19:28:58 -04002919 /* All 3 bits are true, so the link is up */
Harvey Harrisone88bd232008-10-17 14:46:10 -07002920 DBG_ERROR("EXIT %s\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002921
2922 return (SXG_LINK_UP);
2923
2924 bad:
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302925 /* An error occurred reading an MDIO register. This shouldn't happen. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002926 DBG_ERROR("Error reading an MDIO register!\n");
2927 ASSERT(0);
2928 return (SXG_LINK_DOWN);
2929}
2930
J.R. Mauro73b07062008-10-28 18:42:02 -04002931static void sxg_indicate_link_state(struct adapter_t *adapter,
2932 enum SXG_LINK_STATE LinkState)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002933{
2934 if (adapter->LinkState == SXG_LINK_UP) {
2935 DBG_ERROR("%s: LINK now UP, call netif_start_queue\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -07002936 __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002937 netif_start_queue(adapter->netdev);
2938 } else {
2939 DBG_ERROR("%s: LINK now DOWN, call netif_stop_queue\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -07002940 __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002941 netif_stop_queue(adapter->netdev);
2942 }
2943}
2944
2945/*
Mithlesh Thukral7c66b142009-02-06 19:30:40 +05302946 * sxg_change_mtu - Change the Maximum Transfer Unit
2947 * * @returns 0 on success, negative on failure
2948 */
2949int sxg_change_mtu (struct net_device *netdev, int new_mtu)
2950{
2951 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(netdev);
2952
2953 if (!((new_mtu == SXG_DEFAULT_MTU) || (new_mtu == SXG_JUMBO_MTU)))
2954 return -EINVAL;
2955
2956 if(new_mtu == netdev->mtu)
2957 return 0;
2958
2959 netdev->mtu = new_mtu;
2960
2961 if (new_mtu == SXG_JUMBO_MTU) {
2962 adapter->JumboEnabled = TRUE;
2963 adapter->FrameSize = JUMBOMAXFRAME;
2964 adapter->ReceiveBufferSize = SXG_RCV_JUMBO_BUFFER_SIZE;
2965 } else {
2966 adapter->JumboEnabled = FALSE;
2967 adapter->FrameSize = ETHERMAXFRAME;
2968 adapter->ReceiveBufferSize = SXG_RCV_DATA_BUFFER_SIZE;
2969 }
2970
2971 sxg_entry_halt(netdev);
2972 sxg_entry_open(netdev);
2973 return 0;
2974}
2975
2976/*
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002977 * sxg_link_state - Set the link state and if necessary, indicate.
2978 * This routine the central point of processing for all link state changes.
2979 * Nothing else in the driver should alter the link state or perform
2980 * link state indications
2981 *
2982 * Arguments -
2983 * adapter - A pointer to our adapter structure
2984 * LinkState - The link state
2985 *
2986 * Return
2987 * None
2988 */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302989static void sxg_link_state(struct adapter_t *adapter,
2990 enum SXG_LINK_STATE LinkState)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002991{
2992 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "LnkINDCT",
2993 adapter, LinkState, adapter->LinkState, adapter->State);
2994
Harvey Harrisone88bd232008-10-17 14:46:10 -07002995 DBG_ERROR("ENTER %s\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002996
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302997 /*
2998 * Hold the adapter lock during this routine. Maybe move
2999 * the lock to the caller.
3000 */
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05303001 /* IMP TODO : Check if we can survive without taking this lock */
3002// spin_lock(&adapter->AdapterLock);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003003 if (LinkState == adapter->LinkState) {
J.R. Maurob243c4a2008-10-20 19:28:58 -04003004 /* Nothing changed.. */
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05303005// spin_unlock(&adapter->AdapterLock);
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303006 DBG_ERROR("EXIT #0 %s. Link status = %d\n",
3007 __func__, LinkState);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003008 return;
3009 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04003010 /* Save the adapter state */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003011 adapter->LinkState = LinkState;
3012
J.R. Maurob243c4a2008-10-20 19:28:58 -04003013 /* Drop the lock and indicate link state */
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05303014// spin_unlock(&adapter->AdapterLock);
Harvey Harrisone88bd232008-10-17 14:46:10 -07003015 DBG_ERROR("EXIT #1 %s\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003016
3017 sxg_indicate_link_state(adapter, LinkState);
3018}
3019
3020/*
3021 * sxg_write_mdio_reg - Write to a register on the MDIO bus
3022 *
3023 * Arguments -
3024 * adapter - A pointer to our adapter structure
3025 * DevAddr - MDIO device number being addressed
3026 * RegAddr - register address for the specified MDIO device
3027 * Value - value to write to the MDIO register
3028 *
3029 * Return
3030 * status
3031 */
J.R. Mauro73b07062008-10-28 18:42:02 -04003032static int sxg_write_mdio_reg(struct adapter_t *adapter,
J.R. Mauro5c7514e2008-10-05 20:38:52 -04003033 u32 DevAddr, u32 RegAddr, u32 Value)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003034{
Mithlesh Thukral942798b2009-01-05 21:14:34 +05303035 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303036 /* Address operation (written to MIIM field reg) */
3037 u32 AddrOp;
3038 /* Write operation (written to MIIM field reg) */
3039 u32 WriteOp;
3040 u32 Cmd;/* Command (written to MIIM command reg) */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003041 u32 ValueRead;
3042 u32 Timeout;
3043
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303044 /* DBG_ERROR("ENTER %s\n", __func__); */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003045
3046 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO",
3047 adapter, 0, 0, 0);
3048
J.R. Maurob243c4a2008-10-20 19:28:58 -04003049 /* Ensure values don't exceed field width */
3050 DevAddr &= 0x001F; /* 5-bit field */
3051 RegAddr &= 0xFFFF; /* 16-bit field */
3052 Value &= 0xFFFF; /* 16-bit field */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003053
J.R. Maurob243c4a2008-10-20 19:28:58 -04003054 /* Set MIIM field register bits for an MIIM address operation */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003055 AddrOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
3056 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
3057 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
3058 (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) | RegAddr;
3059
J.R. Maurob243c4a2008-10-20 19:28:58 -04003060 /* Set MIIM field register bits for an MIIM write operation */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003061 WriteOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
3062 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
3063 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
3064 (MIIM_OP_WRITE << AXGMAC_AMIIM_FIELD_OP_SHIFT) | Value;
3065
J.R. Maurob243c4a2008-10-20 19:28:58 -04003066 /* Set MIIM command register bits to execute an MIIM command */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003067 Cmd = AXGMAC_AMIIM_CMD_START | AXGMAC_AMIIM_CMD_10G_OPERATION;
3068
J.R. Maurob243c4a2008-10-20 19:28:58 -04003069 /* Reset the command register command bit (in case it's not 0) */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003070 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
3071
J.R. Maurob243c4a2008-10-20 19:28:58 -04003072 /* MIIM write to set the address of the specified MDIO register */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003073 WRITE_REG(HwRegs->MacAmiimField, AddrOp, TRUE);
3074
J.R. Maurob243c4a2008-10-20 19:28:58 -04003075 /* Write to MIIM Command Register to execute to address operation */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003076 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
3077
J.R. Maurob243c4a2008-10-20 19:28:58 -04003078 /* Poll AMIIM Indicator register to wait for completion */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003079 Timeout = SXG_LINK_TIMEOUT;
3080 do {
J.R. Maurob243c4a2008-10-20 19:28:58 -04003081 udelay(100); /* Timeout in 100us units */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003082 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
3083 if (--Timeout == 0) {
3084 return (STATUS_FAILURE);
3085 }
3086 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
3087
J.R. Maurob243c4a2008-10-20 19:28:58 -04003088 /* Reset the command register command bit */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003089 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
3090
J.R. Maurob243c4a2008-10-20 19:28:58 -04003091 /* MIIM write to set up an MDIO write operation */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003092 WRITE_REG(HwRegs->MacAmiimField, WriteOp, TRUE);
3093
J.R. Maurob243c4a2008-10-20 19:28:58 -04003094 /* Write to MIIM Command Register to execute the write operation */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003095 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
3096
J.R. Maurob243c4a2008-10-20 19:28:58 -04003097 /* Poll AMIIM Indicator register to wait for completion */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003098 Timeout = SXG_LINK_TIMEOUT;
3099 do {
J.R. Maurob243c4a2008-10-20 19:28:58 -04003100 udelay(100); /* Timeout in 100us units */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003101 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
3102 if (--Timeout == 0) {
3103 return (STATUS_FAILURE);
3104 }
3105 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
3106
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303107 /* DBG_ERROR("EXIT %s\n", __func__); */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003108
3109 return (STATUS_SUCCESS);
3110}
3111
3112/*
3113 * sxg_read_mdio_reg - Read a register on the MDIO bus
3114 *
3115 * Arguments -
3116 * adapter - A pointer to our adapter structure
3117 * DevAddr - MDIO device number being addressed
3118 * RegAddr - register address for the specified MDIO device
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303119 * pValue - pointer to where to put data read from the MDIO register
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003120 *
3121 * Return
3122 * status
3123 */
J.R. Mauro73b07062008-10-28 18:42:02 -04003124static int sxg_read_mdio_reg(struct adapter_t *adapter,
J.R. Mauro5c7514e2008-10-05 20:38:52 -04003125 u32 DevAddr, u32 RegAddr, u32 *pValue)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003126{
Mithlesh Thukral942798b2009-01-05 21:14:34 +05303127 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303128 u32 AddrOp; /* Address operation (written to MIIM field reg) */
3129 u32 ReadOp; /* Read operation (written to MIIM field reg) */
3130 u32 Cmd; /* Command (written to MIIM command reg) */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003131 u32 ValueRead;
3132 u32 Timeout;
3133
3134 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO",
3135 adapter, 0, 0, 0);
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303136 DBG_ERROR("ENTER %s\n", __FUNCTION__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003137
J.R. Maurob243c4a2008-10-20 19:28:58 -04003138 /* Ensure values don't exceed field width */
3139 DevAddr &= 0x001F; /* 5-bit field */
3140 RegAddr &= 0xFFFF; /* 16-bit field */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003141
J.R. Maurob243c4a2008-10-20 19:28:58 -04003142 /* Set MIIM field register bits for an MIIM address operation */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003143 AddrOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
3144 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
3145 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
3146 (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) | RegAddr;
3147
J.R. Maurob243c4a2008-10-20 19:28:58 -04003148 /* Set MIIM field register bits for an MIIM read operation */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003149 ReadOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
3150 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
3151 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
3152 (MIIM_OP_READ << AXGMAC_AMIIM_FIELD_OP_SHIFT);
3153
J.R. Maurob243c4a2008-10-20 19:28:58 -04003154 /* Set MIIM command register bits to execute an MIIM command */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003155 Cmd = AXGMAC_AMIIM_CMD_START | AXGMAC_AMIIM_CMD_10G_OPERATION;
3156
J.R. Maurob243c4a2008-10-20 19:28:58 -04003157 /* Reset the command register command bit (in case it's not 0) */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003158 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
3159
J.R. Maurob243c4a2008-10-20 19:28:58 -04003160 /* MIIM write to set the address of the specified MDIO register */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003161 WRITE_REG(HwRegs->MacAmiimField, AddrOp, TRUE);
3162
J.R. Maurob243c4a2008-10-20 19:28:58 -04003163 /* Write to MIIM Command Register to execute to address operation */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003164 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
3165
J.R. Maurob243c4a2008-10-20 19:28:58 -04003166 /* Poll AMIIM Indicator register to wait for completion */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003167 Timeout = SXG_LINK_TIMEOUT;
3168 do {
J.R. Maurob243c4a2008-10-20 19:28:58 -04003169 udelay(100); /* Timeout in 100us units */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003170 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
3171 if (--Timeout == 0) {
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05303172 DBG_ERROR("EXIT %s with STATUS_FAILURE 1\n", __FUNCTION__);
3173
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003174 return (STATUS_FAILURE);
3175 }
3176 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
3177
J.R. Maurob243c4a2008-10-20 19:28:58 -04003178 /* Reset the command register command bit */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003179 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
3180
J.R. Maurob243c4a2008-10-20 19:28:58 -04003181 /* MIIM write to set up an MDIO register read operation */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003182 WRITE_REG(HwRegs->MacAmiimField, ReadOp, TRUE);
3183
J.R. Maurob243c4a2008-10-20 19:28:58 -04003184 /* Write to MIIM Command Register to execute the read operation */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003185 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
3186
J.R. Maurob243c4a2008-10-20 19:28:58 -04003187 /* Poll AMIIM Indicator register to wait for completion */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003188 Timeout = SXG_LINK_TIMEOUT;
3189 do {
J.R. Maurob243c4a2008-10-20 19:28:58 -04003190 udelay(100); /* Timeout in 100us units */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003191 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
3192 if (--Timeout == 0) {
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05303193 DBG_ERROR("EXIT %s with STATUS_FAILURE 2\n", __FUNCTION__);
3194
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003195 return (STATUS_FAILURE);
3196 }
3197 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
3198
J.R. Maurob243c4a2008-10-20 19:28:58 -04003199 /* Read the MDIO register data back from the field register */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003200 READ_REG(HwRegs->MacAmiimField, *pValue);
J.R. Maurob243c4a2008-10-20 19:28:58 -04003201 *pValue &= 0xFFFF; /* data is in the lower 16 bits */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003202
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303203 DBG_ERROR("EXIT %s\n", __FUNCTION__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003204
3205 return (STATUS_SUCCESS);
3206}
3207
3208/*
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -07003209 * Functions to obtain the CRC corresponding to the destination mac address.
3210 * This is a standard ethernet CRC in that it is a 32-bit, reflected CRC using
3211 * the polynomial:
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303212 * x^32 + x^26 + x^23 + x^22 + x^16 + x^12 + x^11 + x^10 + x^8 + x^7 + x^5
3213 * + x^4 + x^2 + x^1.
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -07003214 *
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303215 * After the CRC for the 6 bytes is generated (but before the value is
3216 * complemented), we must then transpose the value and return bits 30-23.
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -07003217 */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303218static u32 sxg_crc_table[256];/* Table of CRC's for all possible byte values */
3219static u32 sxg_crc_init; /* Is table initialized */
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -07003220
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303221/* Contruct the CRC32 table */
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -07003222static void sxg_mcast_init_crc32(void)
3223{
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303224 u32 c; /* CRC shit reg */
3225 u32 e = 0; /* Poly X-or pattern */
3226 int i; /* counter */
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -07003227 int k; /* byte being shifted into crc */
3228
3229 static int p[] = { 0, 1, 2, 4, 5, 7, 8, 10, 11, 12, 16, 22, 23, 26 };
3230
3231 for (i = 0; i < sizeof(p) / sizeof(int); i++) {
3232 e |= 1L << (31 - p[i]);
3233 }
3234
3235 for (i = 1; i < 256; i++) {
3236 c = i;
3237 for (k = 8; k; k--) {
3238 c = c & 1 ? (c >> 1) ^ e : c >> 1;
3239 }
3240 sxg_crc_table[i] = c;
3241 }
3242}
3243
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -07003244/*
3245 * Return the MAC hast as described above.
3246 */
3247static unsigned char sxg_mcast_get_mac_hash(char *macaddr)
3248{
3249 u32 crc;
3250 char *p;
3251 int i;
3252 unsigned char machash = 0;
3253
3254 if (!sxg_crc_init) {
3255 sxg_mcast_init_crc32();
3256 sxg_crc_init = 1;
3257 }
3258
3259 crc = 0xFFFFFFFF; /* Preload shift register, per crc-32 spec */
3260 for (i = 0, p = macaddr; i < 6; ++p, ++i) {
3261 crc = (crc >> 8) ^ sxg_crc_table[(crc ^ *p) & 0xFF];
3262 }
3263
3264 /* Return bits 1-8, transposed */
3265 for (i = 1; i < 9; i++) {
3266 machash |= (((crc >> i) & 1) << (8 - i));
3267 }
3268
3269 return (machash);
3270}
3271
J.R. Mauro73b07062008-10-28 18:42:02 -04003272static void sxg_mcast_set_mask(struct adapter_t *adapter)
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -07003273{
Mithlesh Thukral942798b2009-01-05 21:14:34 +05303274 struct sxg_ucode_regs *sxg_regs = adapter->UcodeRegs;
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -07003275
Mithlesh Thukralb040b072009-01-28 07:08:11 +05303276 DBG_ERROR("%s ENTER (%s) MacFilter[%x] mask[%llx]\n", __FUNCTION__,
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -07003277 adapter->netdev->name, (unsigned int)adapter->MacFilter,
3278 adapter->MulticastMask);
3279
3280 if (adapter->MacFilter & (MAC_ALLMCAST | MAC_PROMISC)) {
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05303281 /*
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303282 * Turn on all multicast addresses. We have to do this for
3283 * promiscuous mode as well as ALLMCAST mode. It saves the
3284 * Microcode from having keep state about the MAC configuration
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -07003285 */
Mithlesh Thukralb040b072009-01-28 07:08:11 +05303286 /* DBG_ERROR("sxg: %s MacFilter = MAC_ALLMCAST | MAC_PROMISC\n \
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303287 * SLUT MODE!!!\n",__func__);
3288 */
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -07003289 WRITE_REG(sxg_regs->McastLow, 0xFFFFFFFF, FLUSH);
3290 WRITE_REG(sxg_regs->McastHigh, 0xFFFFFFFF, FLUSH);
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303291 /* DBG_ERROR("%s (%s) WRITE to slic_regs slic_mcastlow&high \
3292 * 0xFFFFFFFF\n",__func__, adapter->netdev->name);
3293 */
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -07003294
3295 } else {
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05303296 /*
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303297 * Commit our multicast mast to the SLIC by writing to the
3298 * multicast address mask registers
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -07003299 */
3300 DBG_ERROR("%s (%s) WRITE mcastlow[%lx] mcasthigh[%lx]\n",
3301 __func__, adapter->netdev->name,
3302 ((ulong) (adapter->MulticastMask & 0xFFFFFFFF)),
3303 ((ulong)
3304 ((adapter->MulticastMask >> 32) & 0xFFFFFFFF)));
3305
3306 WRITE_REG(sxg_regs->McastLow,
3307 (u32) (adapter->MulticastMask & 0xFFFFFFFF), FLUSH);
3308 WRITE_REG(sxg_regs->McastHigh,
3309 (u32) ((adapter->
3310 MulticastMask >> 32) & 0xFFFFFFFF), FLUSH);
3311 }
3312}
3313
J.R. Mauro73b07062008-10-28 18:42:02 -04003314static void sxg_mcast_set_bit(struct adapter_t *adapter, char *address)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003315{
3316 unsigned char crcpoly;
3317
3318 /* Get the CRC polynomial for the mac address */
3319 crcpoly = sxg_mcast_get_mac_hash(address);
3320
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05303321 /*
3322 * We only have space on the SLIC for 64 entries. Lop
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003323 * off the top two bits. (2^6 = 64)
3324 */
3325 crcpoly &= 0x3F;
3326
3327 /* OR in the new bit into our 64 bit mask. */
3328 adapter->MulticastMask |= (u64) 1 << crcpoly;
3329}
Mithlesh Thukralb040b072009-01-28 07:08:11 +05303330
3331/*
3332 * Function takes MAC addresses from dev_mc_list and generates the Mask
3333 */
3334
3335static void sxg_set_mcast_addr(struct adapter_t *adapter)
3336{
3337 struct dev_mc_list *mclist;
3338 struct net_device *dev = adapter->netdev;
3339 int i;
3340
3341 if (adapter->MacFilter & (MAC_ALLMCAST | MAC_MCAST)) {
3342 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
3343 i++, mclist = mclist->next) {
3344 sxg_mcast_set_bit(adapter,mclist->da_addr);
3345 }
3346 }
3347 sxg_mcast_set_mask(adapter);
3348}
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003349
Mithlesh Thukral942798b2009-01-05 21:14:34 +05303350static void sxg_mcast_set_list(struct net_device *dev)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003351{
J.R. Mauro73b07062008-10-28 18:42:02 -04003352 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003353
3354 ASSERT(adapter);
Mithlesh Thukral559990c2009-01-30 20:20:19 +05303355 if (dev->flags & IFF_PROMISC)
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05303356 adapter->MacFilter |= MAC_PROMISC;
Mithlesh Thukralb040b072009-01-28 07:08:11 +05303357 if (dev->flags & IFF_MULTICAST)
3358 adapter->MacFilter |= MAC_MCAST;
Mithlesh Thukral559990c2009-01-30 20:20:19 +05303359 if (dev->flags & IFF_ALLMULTI)
Mithlesh Thukralb040b072009-01-28 07:08:11 +05303360 adapter->MacFilter |= MAC_ALLMCAST;
Mithlesh Thukralb040b072009-01-28 07:08:11 +05303361
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05303362 //XXX handle other flags as well
Mithlesh Thukralb040b072009-01-28 07:08:11 +05303363 sxg_set_mcast_addr(adapter);
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05303364}
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003365
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303366void sxg_free_sgl_buffers(struct adapter_t *adapter)
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303367{
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303368 struct list_entry *ple;
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303369 struct sxg_scatter_gather *Sgl;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003370
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303371 while(!(IsListEmpty(&adapter->AllSglBuffers))) {
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05303372 ple = RemoveHeadList(&adapter->AllSglBuffers);
3373 Sgl = container_of(ple, struct sxg_scatter_gather, AllList);
3374 kfree(Sgl);
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303375 adapter->AllSglBufferCount--;
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303376 }
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303377}
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303378
3379void sxg_free_rcvblocks(struct adapter_t *adapter)
3380{
3381 u32 i;
3382 void *temp_RcvBlock;
3383 struct list_entry *ple;
3384 struct sxg_rcv_block_hdr *RcvBlockHdr;
3385 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
3386 ASSERT((adapter->state == SXG_STATE_INITIALIZING) ||
3387 (adapter->state == SXG_STATE_HALTING));
3388 while(!(IsListEmpty(&adapter->AllRcvBlocks))) {
3389
3390 ple = RemoveHeadList(&adapter->AllRcvBlocks);
3391 RcvBlockHdr = container_of(ple, struct sxg_rcv_block_hdr, AllList);
3392
3393 if(RcvBlockHdr->VirtualAddress) {
3394 temp_RcvBlock = RcvBlockHdr->VirtualAddress;
3395
3396 for(i=0; i< SXG_RCV_DESCRIPTORS_PER_BLOCK;
3397 i++, temp_RcvBlock += SXG_RCV_DATA_HDR_SIZE) {
3398 RcvDataBufferHdr =
3399 (struct sxg_rcv_data_buffer_hdr *)temp_RcvBlock;
3400 SXG_FREE_RCV_PACKET(RcvDataBufferHdr);
3401 }
3402 }
3403
3404 pci_free_consistent(adapter->pcidev,
3405 SXG_RCV_BLOCK_SIZE(SXG_RCV_DATA_HDR_SIZE),
3406 RcvBlockHdr->VirtualAddress,
3407 RcvBlockHdr->PhysicalAddress);
3408 adapter->AllRcvBlockCount--;
3409 }
3410 ASSERT(adapter->AllRcvBlockCount == 0);
3411 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFrRBlk",
3412 adapter, 0, 0, 0);
3413}
3414void sxg_free_mcast_addrs(struct adapter_t *adapter)
3415{
3416 struct sxg_multicast_address *address;
3417 while(adapter->MulticastAddrs) {
3418 address = adapter->MulticastAddrs;
3419 adapter->MulticastAddrs = address->Next;
3420 kfree(address);
3421 }
3422
3423 adapter->MulticastMask= 0;
3424}
3425
3426void sxg_unmap_resources(struct adapter_t *adapter)
3427{
3428 if(adapter->HwRegs) {
3429 iounmap((void *)adapter->HwRegs);
3430 }
3431 if(adapter->UcodeRegs) {
3432 iounmap((void *)adapter->UcodeRegs);
3433 }
3434
3435 ASSERT(adapter->AllRcvBlockCount == 0);
3436 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFrRBlk",
3437 adapter, 0, 0, 0);
3438}
3439
3440
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303441
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003442/*
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303443 * sxg_free_resources - Free everything allocated in SxgAllocateResources
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003444 *
3445 * Arguments -
3446 * adapter - A pointer to our adapter structure
3447 *
3448 * Return
3449 * none
3450 */
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303451void sxg_free_resources(struct adapter_t *adapter)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003452{
3453 u32 RssIds, IsrCount;
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05303454 struct net_device *netdev = adapter->netdev;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003455 RssIds = SXG_RSS_CPU_COUNT(adapter);
3456 IsrCount = adapter->MsiEnabled ? RssIds : 1;
3457
3458 if (adapter->BasicAllocations == FALSE) {
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05303459 /*
3460 * No allocations have been made, including spinlocks,
3461 * or listhead initializations. Return.
3462 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003463 return;
3464 }
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303465
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05303466 /* Free Irq */
3467 free_irq(adapter->netdev->irq, netdev);
3468
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003469 if (!(IsListEmpty(&adapter->AllRcvBlocks))) {
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303470 sxg_free_rcvblocks(adapter);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003471 }
3472 if (!(IsListEmpty(&adapter->AllSglBuffers))) {
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303473 sxg_free_sgl_buffers(adapter);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003474 }
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303475
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003476 if (adapter->XmtRingZeroIndex) {
3477 pci_free_consistent(adapter->pcidev,
3478 sizeof(u32),
3479 adapter->XmtRingZeroIndex,
3480 adapter->PXmtRingZeroIndex);
3481 }
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303482 if (adapter->Isr) {
3483 pci_free_consistent(adapter->pcidev,
3484 sizeof(u32) * IsrCount,
3485 adapter->Isr, adapter->PIsr);
3486 }
3487
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303488 if (adapter->EventRings) {
3489 pci_free_consistent(adapter->pcidev,
3490 sizeof(struct sxg_event_ring) * RssIds,
3491 adapter->EventRings, adapter->PEventRings);
3492 }
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303493 if (adapter->RcvRings) {
3494 pci_free_consistent(adapter->pcidev,
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303495 sizeof(struct sxg_rcv_ring) * 1,
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303496 adapter->RcvRings,
3497 adapter->PRcvRings);
3498 adapter->RcvRings = NULL;
3499 }
3500
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303501 if(adapter->XmtRings) {
3502 pci_free_consistent(adapter->pcidev,
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303503 sizeof(struct sxg_xmt_ring) * 1,
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303504 adapter->XmtRings,
3505 adapter->PXmtRings);
3506 adapter->XmtRings = NULL;
3507 }
3508
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303509 if (adapter->ucode_stats) {
3510 pci_unmap_single(adapter->pcidev,
3511 sizeof(struct sxg_ucode_stats),
3512 adapter->pucode_stats, PCI_DMA_FROMDEVICE);
3513 adapter->ucode_stats = NULL;
3514 }
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303515
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003516
J.R. Maurob243c4a2008-10-20 19:28:58 -04003517 /* Unmap register spaces */
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303518 sxg_unmap_resources(adapter);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003519
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303520 sxg_free_mcast_addrs(adapter);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003521
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003522 adapter->BasicAllocations = FALSE;
3523
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003524}
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003525
3526/*
3527 * sxg_allocate_complete -
3528 *
3529 * This routine is called when a memory allocation has completed.
3530 *
3531 * Arguments -
J.R. Mauro73b07062008-10-28 18:42:02 -04003532 * struct adapter_t * - Our adapter structure
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003533 * VirtualAddress - Memory virtual address
3534 * PhysicalAddress - Memory physical address
3535 * Length - Length of memory allocated (or 0)
3536 * Context - The type of buffer allocated
3537 *
3538 * Return
3539 * None.
3540 */
Mithlesh Thukral0d414722009-01-19 20:29:59 +05303541static int sxg_allocate_complete(struct adapter_t *adapter,
J.R. Mauro5c7514e2008-10-05 20:38:52 -04003542 void *VirtualAddress,
3543 dma_addr_t PhysicalAddress,
Mithlesh Thukral942798b2009-01-05 21:14:34 +05303544 u32 Length, enum sxg_buffer_type Context)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003545{
Mithlesh Thukral0d414722009-01-19 20:29:59 +05303546 int status = 0;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003547 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocCmp",
3548 adapter, VirtualAddress, Length, Context);
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05303549 ASSERT(atomic_read(&adapter->pending_allocations));
3550 atomic_dec(&adapter->pending_allocations);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003551
3552 switch (Context) {
3553
3554 case SXG_BUFFER_TYPE_RCV:
Mithlesh Thukral0d414722009-01-19 20:29:59 +05303555 status = sxg_allocate_rcvblock_complete(adapter,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003556 VirtualAddress,
3557 PhysicalAddress, Length);
3558 break;
3559 case SXG_BUFFER_TYPE_SGL:
Mithlesh Thukral942798b2009-01-05 21:14:34 +05303560 sxg_allocate_sgl_buffer_complete(adapter, (struct sxg_scatter_gather *)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003561 VirtualAddress,
3562 PhysicalAddress, Length);
3563 break;
3564 }
3565 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlocCmp",
3566 adapter, VirtualAddress, Length, Context);
Mithlesh Thukral0d414722009-01-19 20:29:59 +05303567
3568 return status;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003569}
3570
3571/*
3572 * sxg_allocate_buffer_memory - Shared memory allocation routine used for
3573 * synchronous and asynchronous buffer allocations
3574 *
3575 * Arguments -
3576 * adapter - A pointer to our adapter structure
3577 * Size - block size to allocate
3578 * BufferType - Type of buffer to allocate
3579 *
3580 * Return
3581 * int
3582 */
J.R. Mauro73b07062008-10-28 18:42:02 -04003583static int sxg_allocate_buffer_memory(struct adapter_t *adapter,
Mithlesh Thukral942798b2009-01-05 21:14:34 +05303584 u32 Size, enum sxg_buffer_type BufferType)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003585{
3586 int status;
J.R. Mauro5c7514e2008-10-05 20:38:52 -04003587 void *Buffer;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003588 dma_addr_t pBuffer;
3589
3590 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocMem",
3591 adapter, Size, BufferType, 0);
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05303592 /*
3593 * Grab the adapter lock and check the state. If we're in anything other
3594 * than INITIALIZING or RUNNING state, fail. This is to prevent
3595 * allocations in an improper driver state
3596 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003597
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05303598 atomic_inc(&adapter->pending_allocations);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003599
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303600 if(BufferType != SXG_BUFFER_TYPE_SGL)
3601 Buffer = pci_alloc_consistent(adapter->pcidev, Size, &pBuffer);
3602 else {
3603 Buffer = kzalloc(Size, GFP_ATOMIC);
Mithlesh Thukral54aed112009-01-19 20:27:17 +05303604 pBuffer = (dma_addr_t)NULL;
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303605 }
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003606 if (Buffer == NULL) {
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05303607 /*
3608 * Decrement the AllocationsPending count while holding
3609 * the lock. Pause processing relies on this
3610 */
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05303611 atomic_dec(&adapter->pending_allocations);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003612 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlcMemF1",
3613 adapter, Size, BufferType, 0);
3614 return (STATUS_RESOURCES);
3615 }
Mithlesh Thukral0d414722009-01-19 20:29:59 +05303616 status = sxg_allocate_complete(adapter, Buffer, pBuffer, Size, BufferType);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003617
3618 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlocMem",
3619 adapter, Size, BufferType, status);
Mithlesh Thukral0d414722009-01-19 20:29:59 +05303620 return status;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003621}
3622
3623/*
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303624 * sxg_allocate_rcvblock_complete - Complete a receive descriptor
3625 * block allocation
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003626 *
3627 * Arguments -
3628 * adapter - A pointer to our adapter structure
3629 * RcvBlock - receive block virtual address
3630 * PhysicalAddress - Physical address
3631 * Length - Memory length
3632 *
3633 * Return
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003634 */
Mithlesh Thukral0d414722009-01-19 20:29:59 +05303635static int sxg_allocate_rcvblock_complete(struct adapter_t *adapter,
J.R. Mauro5c7514e2008-10-05 20:38:52 -04003636 void *RcvBlock,
3637 dma_addr_t PhysicalAddress,
3638 u32 Length)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003639{
3640 u32 i;
3641 u32 BufferSize = adapter->ReceiveBufferSize;
3642 u64 Paddr;
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303643 void *temp_RcvBlock;
Mithlesh Thukral942798b2009-01-05 21:14:34 +05303644 struct sxg_rcv_block_hdr *RcvBlockHdr;
Mithlesh Thukral942798b2009-01-05 21:14:34 +05303645 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
3646 struct sxg_rcv_descriptor_block *RcvDescriptorBlock;
3647 struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003648
3649 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlRcvBlk",
3650 adapter, RcvBlock, Length, 0);
3651 if (RcvBlock == NULL) {
3652 goto fail;
3653 }
3654 memset(RcvBlock, 0, Length);
3655 ASSERT((BufferSize == SXG_RCV_DATA_BUFFER_SIZE) ||
3656 (BufferSize == SXG_RCV_JUMBO_BUFFER_SIZE));
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303657 ASSERT(Length == SXG_RCV_BLOCK_SIZE(SXG_RCV_DATA_HDR_SIZE));
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05303658 /*
3659 * First, initialize the contained pool of receive data buffers.
3660 * This initialization requires NBL/NB/MDL allocations, if any of them
3661 * fail, free the block and return without queueing the shared memory
3662 */
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303663 //RcvDataBuffer = RcvBlock;
3664 temp_RcvBlock = RcvBlock;
3665 for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
3666 i++, temp_RcvBlock += SXG_RCV_DATA_HDR_SIZE) {
3667 RcvDataBufferHdr = (struct sxg_rcv_data_buffer_hdr *)
3668 temp_RcvBlock;
3669 /* For FREE macro assertion */
3670 RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM;
3671 SXG_ALLOCATE_RCV_PACKET(adapter, RcvDataBufferHdr, BufferSize);
3672 if (RcvDataBufferHdr->SxgDumbRcvPacket == NULL)
3673 goto fail;
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05303674
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303675 }
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003676
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05303677 /*
3678 * Place this entire block of memory on the AllRcvBlocks queue so it
3679 * can be free later
3680 */
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303681
3682 RcvBlockHdr = (struct sxg_rcv_block_hdr *) ((unsigned char *)RcvBlock +
3683 SXG_RCV_BLOCK_HDR_OFFSET(SXG_RCV_DATA_HDR_SIZE));
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003684 RcvBlockHdr->VirtualAddress = RcvBlock;
3685 RcvBlockHdr->PhysicalAddress = PhysicalAddress;
3686 spin_lock(&adapter->RcvQLock);
3687 adapter->AllRcvBlockCount++;
3688 InsertTailList(&adapter->AllRcvBlocks, &RcvBlockHdr->AllList);
3689 spin_unlock(&adapter->RcvQLock);
3690
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303691 /* Now free the contained receive data buffers that we
3692 * initialized above */
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303693 temp_RcvBlock = RcvBlock;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003694 for (i = 0, Paddr = PhysicalAddress;
3695 i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303696 i++, Paddr += SXG_RCV_DATA_HDR_SIZE,
3697 temp_RcvBlock += SXG_RCV_DATA_HDR_SIZE) {
3698 RcvDataBufferHdr =
3699 (struct sxg_rcv_data_buffer_hdr *)temp_RcvBlock;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003700 spin_lock(&adapter->RcvQLock);
3701 SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
3702 spin_unlock(&adapter->RcvQLock);
3703 }
3704
J.R. Maurob243c4a2008-10-20 19:28:58 -04003705 /* Locate the descriptor block and put it on a separate free queue */
J.R. Mauro5c7514e2008-10-05 20:38:52 -04003706 RcvDescriptorBlock =
Mithlesh Thukral942798b2009-01-05 21:14:34 +05303707 (struct sxg_rcv_descriptor_block *) ((unsigned char *)RcvBlock +
J.R. Mauro5c7514e2008-10-05 20:38:52 -04003708 SXG_RCV_DESCRIPTOR_BLOCK_OFFSET
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303709 (SXG_RCV_DATA_HDR_SIZE));
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003710 RcvDescriptorBlockHdr =
Mithlesh Thukral942798b2009-01-05 21:14:34 +05303711 (struct sxg_rcv_descriptor_block_hdr *) ((unsigned char *)RcvBlock +
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003712 SXG_RCV_DESCRIPTOR_BLOCK_HDR_OFFSET
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303713 (SXG_RCV_DATA_HDR_SIZE));
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003714 RcvDescriptorBlockHdr->VirtualAddress = RcvDescriptorBlock;
3715 RcvDescriptorBlockHdr->PhysicalAddress = Paddr;
3716 spin_lock(&adapter->RcvQLock);
3717 SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter, RcvDescriptorBlockHdr);
3718 spin_unlock(&adapter->RcvQLock);
3719 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlRBlk",
3720 adapter, RcvBlock, Length, 0);
Mithlesh Thukral0d414722009-01-19 20:29:59 +05303721 return STATUS_SUCCESS;
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303722fail:
J.R. Maurob243c4a2008-10-20 19:28:58 -04003723 /* Free any allocated resources */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003724 if (RcvBlock) {
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303725 temp_RcvBlock = RcvBlock;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003726 for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303727 i++, temp_RcvBlock += SXG_RCV_DATA_HDR_SIZE) {
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003728 RcvDataBufferHdr =
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303729 (struct sxg_rcv_data_buffer_hdr *)temp_RcvBlock;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003730 SXG_FREE_RCV_PACKET(RcvDataBufferHdr);
3731 }
3732 pci_free_consistent(adapter->pcidev,
3733 Length, RcvBlock, PhysicalAddress);
3734 }
Harvey Harrisone88bd232008-10-17 14:46:10 -07003735 DBG_ERROR("%s: OUT OF RESOURCES\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003736 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "RcvAFail",
3737 adapter, adapter->FreeRcvBufferCount,
3738 adapter->FreeRcvBlockCount, adapter->AllRcvBlockCount);
3739 adapter->Stats.NoMem++;
Mithlesh Thukral0d414722009-01-19 20:29:59 +05303740 /* As allocation failed, free all previously allocated blocks..*/
3741 //sxg_free_rcvblocks(adapter);
3742
3743 return STATUS_RESOURCES;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003744}
3745
3746/*
3747 * sxg_allocate_sgl_buffer_complete - Complete a SGL buffer allocation
3748 *
3749 * Arguments -
3750 * adapter - A pointer to our adapter structure
Mithlesh Thukral942798b2009-01-05 21:14:34 +05303751 * SxgSgl - struct sxg_scatter_gather buffer
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003752 * PhysicalAddress - Physical address
3753 * Length - Memory length
3754 *
3755 * Return
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003756 */
J.R. Mauro73b07062008-10-28 18:42:02 -04003757static void sxg_allocate_sgl_buffer_complete(struct adapter_t *adapter,
Mithlesh Thukral942798b2009-01-05 21:14:34 +05303758 struct sxg_scatter_gather *SxgSgl,
J.R. Mauro5c7514e2008-10-05 20:38:52 -04003759 dma_addr_t PhysicalAddress,
3760 u32 Length)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003761{
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303762 unsigned long sgl_flags;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003763 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlSglCmp",
3764 adapter, SxgSgl, Length, 0);
Mithlesh Thukralc5e5cf52009-02-06 19:31:40 +05303765 spin_lock_irqsave(&adapter->SglQLock, sgl_flags);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003766 adapter->AllSglBufferCount++;
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303767 /* PhysicalAddress; */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303768 SxgSgl->PhysicalAddress = PhysicalAddress;
3769 /* Initialize backpointer once */
3770 SxgSgl->adapter = adapter;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003771 InsertTailList(&adapter->AllSglBuffers, &SxgSgl->AllList);
Mithlesh Thukralc5e5cf52009-02-06 19:31:40 +05303772 spin_unlock_irqrestore(&adapter->SglQLock, sgl_flags);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003773 SxgSgl->State = SXG_BUFFER_BUSY;
Mithlesh Thukralc5e5cf52009-02-06 19:31:40 +05303774 SXG_FREE_SGL_BUFFER(adapter, SxgSgl, NULL);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003775 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlSgl",
3776 adapter, SxgSgl, Length, 0);
3777}
3778
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003779
Mithlesh Thukral54aed112009-01-19 20:27:17 +05303780static int sxg_adapter_set_hwaddr(struct adapter_t *adapter)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003781{
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303782 /*
3783 * DBG_ERROR ("%s ENTER card->config_set[%x] port[%d] physport[%d] \
3784 * funct#[%d]\n", __func__, card->config_set,
3785 * adapter->port, adapter->physport, adapter->functionnumber);
3786 *
3787 * sxg_dbg_macaddrs(adapter);
3788 */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303789 /* DBG_ERROR ("%s AFTER copying from config.macinfo into currmacaddr\n",
3790 * __FUNCTION__);
3791 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003792
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303793 /* sxg_dbg_macaddrs(adapter); */
3794
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05303795 struct net_device * dev = adapter->netdev;
3796 if(!dev)
3797 {
3798 printk("sxg: Dev is Null\n");
3799 }
3800
3801 DBG_ERROR("%s ENTER (%s)\n", __FUNCTION__, adapter->netdev->name);
3802
3803 if (netif_running(dev)) {
3804 return -EBUSY;
3805 }
3806 if (!adapter) {
3807 return -EBUSY;
3808 }
3809
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003810 if (!(adapter->currmacaddr[0] ||
3811 adapter->currmacaddr[1] ||
3812 adapter->currmacaddr[2] ||
3813 adapter->currmacaddr[3] ||
3814 adapter->currmacaddr[4] || adapter->currmacaddr[5])) {
3815 memcpy(adapter->currmacaddr, adapter->macaddr, 6);
3816 }
3817 if (adapter->netdev) {
3818 memcpy(adapter->netdev->dev_addr, adapter->currmacaddr, 6);
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05303819 memcpy(adapter->netdev->perm_addr, adapter->currmacaddr, 6);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003820 }
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303821 /* DBG_ERROR ("%s EXIT port %d\n", __func__, adapter->port); */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003822 sxg_dbg_macaddrs(adapter);
3823
Mithlesh Thukral54aed112009-01-19 20:27:17 +05303824 return 0;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003825}
3826
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -07003827#if XXXTODO
Mithlesh Thukral942798b2009-01-05 21:14:34 +05303828static int sxg_mac_set_address(struct net_device *dev, void *ptr)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003829{
J.R. Mauro73b07062008-10-28 18:42:02 -04003830 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003831 struct sockaddr *addr = ptr;
3832
Harvey Harrisone88bd232008-10-17 14:46:10 -07003833 DBG_ERROR("%s ENTER (%s)\n", __func__, adapter->netdev->name);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003834
3835 if (netif_running(dev)) {
3836 return -EBUSY;
3837 }
3838 if (!adapter) {
3839 return -EBUSY;
3840 }
3841 DBG_ERROR("sxg: %s (%s) curr %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -07003842 __func__, adapter->netdev->name, adapter->currmacaddr[0],
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003843 adapter->currmacaddr[1], adapter->currmacaddr[2],
3844 adapter->currmacaddr[3], adapter->currmacaddr[4],
3845 adapter->currmacaddr[5]);
3846 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3847 memcpy(adapter->currmacaddr, addr->sa_data, dev->addr_len);
3848 DBG_ERROR("sxg: %s (%s) new %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -07003849 __func__, adapter->netdev->name, adapter->currmacaddr[0],
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003850 adapter->currmacaddr[1], adapter->currmacaddr[2],
3851 adapter->currmacaddr[3], adapter->currmacaddr[4],
3852 adapter->currmacaddr[5]);
3853
3854 sxg_config_set(adapter, TRUE);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003855 return 0;
3856}
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -07003857#endif
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003858
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003859/*
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05303860 * SXG DRIVER FUNCTIONS (below)
3861 *
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003862 * sxg_initialize_adapter - Initialize adapter
3863 *
3864 * Arguments -
3865 * adapter - A pointer to our adapter structure
3866 *
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05303867 * Return - int
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003868 */
J.R. Mauro73b07062008-10-28 18:42:02 -04003869static int sxg_initialize_adapter(struct adapter_t *adapter)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003870{
3871 u32 RssIds, IsrCount;
3872 u32 i;
3873 int status;
Mithlesh Thukral7c66b142009-02-06 19:30:40 +05303874 int sxg_rcv_ring_size = SXG_RCV_RING_SIZE;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003875
3876 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "InitAdpt",
3877 adapter, 0, 0, 0);
3878
J.R. Maurob243c4a2008-10-20 19:28:58 -04003879 RssIds = 1; /* XXXTODO SXG_RSS_CPU_COUNT(adapter); */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003880 IsrCount = adapter->MsiEnabled ? RssIds : 1;
3881
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05303882 /*
3883 * Sanity check SXG_UCODE_REGS structure definition to
3884 * make sure the length is correct
3885 */
Mithlesh Thukral942798b2009-01-05 21:14:34 +05303886 ASSERT(sizeof(struct sxg_ucode_regs) == SXG_REGISTER_SIZE_PER_CPU);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003887
J.R. Maurob243c4a2008-10-20 19:28:58 -04003888 /* Disable interrupts */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003889 SXG_DISABLE_ALL_INTERRUPTS(adapter);
3890
J.R. Maurob243c4a2008-10-20 19:28:58 -04003891 /* Set MTU */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003892 ASSERT((adapter->FrameSize == ETHERMAXFRAME) ||
3893 (adapter->FrameSize == JUMBOMAXFRAME));
3894 WRITE_REG(adapter->UcodeRegs[0].LinkMtu, adapter->FrameSize, TRUE);
3895
J.R. Maurob243c4a2008-10-20 19:28:58 -04003896 /* Set event ring base address and size */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003897 WRITE_REG64(adapter,
3898 adapter->UcodeRegs[0].EventBase, adapter->PEventRings, 0);
3899 WRITE_REG(adapter->UcodeRegs[0].EventSize, EVENT_RING_SIZE, TRUE);
3900
J.R. Maurob243c4a2008-10-20 19:28:58 -04003901 /* Per-ISR initialization */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003902 for (i = 0; i < IsrCount; i++) {
3903 u64 Addr;
J.R. Maurob243c4a2008-10-20 19:28:58 -04003904 /* Set interrupt status pointer */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003905 Addr = adapter->PIsr + (i * sizeof(u32));
3906 WRITE_REG64(adapter, adapter->UcodeRegs[i].Isp, Addr, i);
3907 }
3908
J.R. Maurob243c4a2008-10-20 19:28:58 -04003909 /* XMT ring zero index */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003910 WRITE_REG64(adapter,
3911 adapter->UcodeRegs[0].SPSendIndex,
3912 adapter->PXmtRingZeroIndex, 0);
3913
J.R. Maurob243c4a2008-10-20 19:28:58 -04003914 /* Per-RSS initialization */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003915 for (i = 0; i < RssIds; i++) {
J.R. Maurob243c4a2008-10-20 19:28:58 -04003916 /* Release all event ring entries to the Microcode */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003917 WRITE_REG(adapter->UcodeRegs[i].EventRelease, EVENT_RING_SIZE,
3918 TRUE);
3919 }
3920
J.R. Maurob243c4a2008-10-20 19:28:58 -04003921 /* Transmit ring base and size */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003922 WRITE_REG64(adapter,
3923 adapter->UcodeRegs[0].XmtBase, adapter->PXmtRings, 0);
3924 WRITE_REG(adapter->UcodeRegs[0].XmtSize, SXG_XMT_RING_SIZE, TRUE);
3925
J.R. Maurob243c4a2008-10-20 19:28:58 -04003926 /* Receive ring base and size */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003927 WRITE_REG64(adapter,
3928 adapter->UcodeRegs[0].RcvBase, adapter->PRcvRings, 0);
Mithlesh Thukral7c66b142009-02-06 19:30:40 +05303929 if (adapter->JumboEnabled == TRUE)
3930 sxg_rcv_ring_size = SXG_JUMBO_RCV_RING_SIZE;
3931 WRITE_REG(adapter->UcodeRegs[0].RcvSize, sxg_rcv_ring_size, TRUE);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003932
J.R. Maurob243c4a2008-10-20 19:28:58 -04003933 /* Populate the card with receive buffers */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003934 sxg_stock_rcv_buffers(adapter);
3935
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05303936 /*
3937 * Initialize checksum offload capabilities. At the moment we always
3938 * enable IP and TCP receive checksums on the card. Depending on the
3939 * checksum configuration specified by the user, we can choose to
3940 * report or ignore the checksum information provided by the card.
3941 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003942 WRITE_REG(adapter->UcodeRegs[0].ReceiveChecksum,
3943 SXG_RCV_TCP_CSUM_ENABLED | SXG_RCV_IP_CSUM_ENABLED, TRUE);
3944
J.R. Maurob243c4a2008-10-20 19:28:58 -04003945 /* Initialize the MAC, XAUI */
Harvey Harrisone88bd232008-10-17 14:46:10 -07003946 DBG_ERROR("sxg: %s ENTER sxg_initialize_link\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003947 status = sxg_initialize_link(adapter);
Harvey Harrisone88bd232008-10-17 14:46:10 -07003948 DBG_ERROR("sxg: %s EXIT sxg_initialize_link status[%x]\n", __func__,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003949 status);
3950 if (status != STATUS_SUCCESS) {
3951 return (status);
3952 }
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05303953 /*
3954 * Initialize Dead to FALSE.
3955 * SlicCheckForHang or SlicDumpThread will take it from here.
3956 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003957 adapter->Dead = FALSE;
3958 adapter->PingOutstanding = FALSE;
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05303959 adapter->State = SXG_STATE_RUNNING;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003960
3961 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XInit",
3962 adapter, 0, 0, 0);
3963 return (STATUS_SUCCESS);
3964}
3965
3966/*
3967 * sxg_fill_descriptor_block - Populate a descriptor block and give it to
3968 * the card. The caller should hold the RcvQLock
3969 *
3970 * Arguments -
3971 * adapter - A pointer to our adapter structure
3972 * RcvDescriptorBlockHdr - Descriptor block to fill
3973 *
3974 * Return
3975 * status
3976 */
J.R. Mauro73b07062008-10-28 18:42:02 -04003977static int sxg_fill_descriptor_block(struct adapter_t *adapter,
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303978 struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003979{
3980 u32 i;
Mithlesh Thukral942798b2009-01-05 21:14:34 +05303981 struct sxg_ring_info *RcvRingInfo = &adapter->RcvRingZeroInfo;
3982 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
3983 struct sxg_rcv_descriptor_block *RcvDescriptorBlock;
3984 struct sxg_cmd *RingDescriptorCmd;
3985 struct sxg_rcv_ring *RingZero = &adapter->RcvRings[0];
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003986
3987 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "FilBlk",
3988 adapter, adapter->RcvBuffersOnCard,
3989 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
3990
3991 ASSERT(RcvDescriptorBlockHdr);
3992
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05303993 /*
3994 * If we don't have the resources to fill the descriptor block,
3995 * return failure
3996 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003997 if ((adapter->FreeRcvBufferCount < SXG_RCV_DESCRIPTORS_PER_BLOCK) ||
3998 SXG_RING_FULL(RcvRingInfo)) {
3999 adapter->Stats.NoMem++;
4000 return (STATUS_FAILURE);
4001 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04004002 /* Get a ring descriptor command */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004003 SXG_GET_CMD(RingZero,
4004 RcvRingInfo, RingDescriptorCmd, RcvDescriptorBlockHdr);
4005 ASSERT(RingDescriptorCmd);
4006 RcvDescriptorBlockHdr->State = SXG_BUFFER_ONCARD;
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05304007 RcvDescriptorBlock = (struct sxg_rcv_descriptor_block *)
4008 RcvDescriptorBlockHdr->VirtualAddress;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004009
J.R. Maurob243c4a2008-10-20 19:28:58 -04004010 /* Fill in the descriptor block */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004011 for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK; i++) {
4012 SXG_GET_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
4013 ASSERT(RcvDataBufferHdr);
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05304014// ASSERT(RcvDataBufferHdr->SxgDumbRcvPacket);
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05304015 if (!RcvDataBufferHdr->SxgDumbRcvPacket) {
4016 SXG_ALLOCATE_RCV_PACKET(adapter, RcvDataBufferHdr,
4017 adapter->ReceiveBufferSize);
4018 if(RcvDataBufferHdr->skb)
4019 RcvDataBufferHdr->SxgDumbRcvPacket =
4020 RcvDataBufferHdr->skb;
4021 else
4022 goto no_memory;
4023 }
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004024 SXG_REINIATIALIZE_PACKET(RcvDataBufferHdr->SxgDumbRcvPacket);
4025 RcvDataBufferHdr->State = SXG_BUFFER_ONCARD;
J.R. Mauro5c7514e2008-10-05 20:38:52 -04004026 RcvDescriptorBlock->Descriptors[i].VirtualAddress =
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05304027 (void *)RcvDataBufferHdr;
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05304028
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004029 RcvDescriptorBlock->Descriptors[i].PhysicalAddress =
4030 RcvDataBufferHdr->PhysicalAddress;
4031 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04004032 /* Add the descriptor block to receive descriptor ring 0 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004033 RingDescriptorCmd->Sgl = RcvDescriptorBlockHdr->PhysicalAddress;
4034
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05304035 /*
4036 * RcvBuffersOnCard is not protected via the receive lock (see
4037 * sxg_process_event_queue) We don't want to grap a lock every time a
4038 * buffer is returned to us, so we use atomic interlocked functions
4039 * instead.
4040 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004041 adapter->RcvBuffersOnCard += SXG_RCV_DESCRIPTORS_PER_BLOCK;
4042
4043 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DscBlk",
4044 RcvDescriptorBlockHdr,
4045 RingDescriptorCmd, RcvRingInfo->Head, RcvRingInfo->Tail);
4046
4047 WRITE_REG(adapter->UcodeRegs[0].RcvCmd, 1, true);
4048 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFilBlk",
4049 adapter, adapter->RcvBuffersOnCard,
4050 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
4051 return (STATUS_SUCCESS);
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05304052no_memory:
4053 return (-ENOMEM);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004054}
4055
4056/*
4057 * sxg_stock_rcv_buffers - Stock the card with receive buffers
4058 *
4059 * Arguments -
4060 * adapter - A pointer to our adapter structure
4061 *
4062 * Return
4063 * None
4064 */
J.R. Mauro73b07062008-10-28 18:42:02 -04004065static void sxg_stock_rcv_buffers(struct adapter_t *adapter)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004066{
Mithlesh Thukral942798b2009-01-05 21:14:34 +05304067 struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr;
Mithlesh Thukral7c66b142009-02-06 19:30:40 +05304068 int sxg_rcv_data_buffers = SXG_RCV_DATA_BUFFERS;
4069 int sxg_min_rcv_data_buffers = SXG_MIN_RCV_DATA_BUFFERS;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004070
4071 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "StockBuf",
4072 adapter, adapter->RcvBuffersOnCard,
4073 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05304074 /*
4075 * First, see if we've got less than our minimum threshold of
4076 * receive buffers, there isn't an allocation in progress, and
4077 * we haven't exceeded our maximum.. get another block of buffers
4078 * None of this needs to be SMP safe. It's round numbers.
4079 */
Mithlesh Thukral7c66b142009-02-06 19:30:40 +05304080 if (adapter->JumboEnabled == TRUE)
4081 sxg_min_rcv_data_buffers = SXG_MIN_JUMBO_RCV_DATA_BUFFERS;
4082 if ((adapter->FreeRcvBufferCount < sxg_min_rcv_data_buffers) &&
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004083 (adapter->AllRcvBlockCount < SXG_MAX_RCV_BLOCKS) &&
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05304084 (atomic_read(&adapter->pending_allocations) == 0)) {
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004085 sxg_allocate_buffer_memory(adapter,
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05304086 SXG_RCV_BLOCK_SIZE
4087 (SXG_RCV_DATA_HDR_SIZE),
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004088 SXG_BUFFER_TYPE_RCV);
4089 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04004090 /* Now grab the RcvQLock lock and proceed */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004091 spin_lock(&adapter->RcvQLock);
Mithlesh Thukral7c66b142009-02-06 19:30:40 +05304092 if (adapter->JumboEnabled)
4093 sxg_rcv_data_buffers = SXG_JUMBO_RCV_DATA_BUFFERS;
4094 while (adapter->RcvBuffersOnCard < sxg_rcv_data_buffers) {
Mithlesh Thukral942798b2009-01-05 21:14:34 +05304095 struct list_entry *_ple;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004096
J.R. Maurob243c4a2008-10-20 19:28:58 -04004097 /* Get a descriptor block */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004098 RcvDescriptorBlockHdr = NULL;
4099 if (adapter->FreeRcvBlockCount) {
4100 _ple = RemoveHeadList(&adapter->FreeRcvBlocks);
J.R. Mauro5c7514e2008-10-05 20:38:52 -04004101 RcvDescriptorBlockHdr =
Mithlesh Thukral942798b2009-01-05 21:14:34 +05304102 container_of(_ple, struct sxg_rcv_descriptor_block_hdr,
J.R. Mauro5c7514e2008-10-05 20:38:52 -04004103 FreeList);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004104 adapter->FreeRcvBlockCount--;
4105 RcvDescriptorBlockHdr->State = SXG_BUFFER_BUSY;
4106 }
4107
4108 if (RcvDescriptorBlockHdr == NULL) {
J.R. Maurob243c4a2008-10-20 19:28:58 -04004109 /* Bail out.. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004110 adapter->Stats.NoMem++;
4111 break;
4112 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04004113 /* Fill in the descriptor block and give it to the card */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004114 if (sxg_fill_descriptor_block(adapter, RcvDescriptorBlockHdr) ==
4115 STATUS_FAILURE) {
J.R. Maurob243c4a2008-10-20 19:28:58 -04004116 /* Free the descriptor block */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004117 SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter,
4118 RcvDescriptorBlockHdr);
4119 break;
4120 }
4121 }
4122 spin_unlock(&adapter->RcvQLock);
4123 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFilBlks",
4124 adapter, adapter->RcvBuffersOnCard,
4125 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
4126}
4127
4128/*
4129 * sxg_complete_descriptor_blocks - Return descriptor blocks that have been
4130 * completed by the microcode
4131 *
4132 * Arguments -
4133 * adapter - A pointer to our adapter structure
4134 * Index - Where the microcode is up to
4135 *
4136 * Return
4137 * None
4138 */
J.R. Mauro73b07062008-10-28 18:42:02 -04004139static void sxg_complete_descriptor_blocks(struct adapter_t *adapter,
J.R. Mauro5c7514e2008-10-05 20:38:52 -04004140 unsigned char Index)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004141{
Mithlesh Thukral942798b2009-01-05 21:14:34 +05304142 struct sxg_rcv_ring *RingZero = &adapter->RcvRings[0];
4143 struct sxg_ring_info *RcvRingInfo = &adapter->RcvRingZeroInfo;
4144 struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr;
4145 struct sxg_cmd *RingDescriptorCmd;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004146
4147 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpRBlks",
4148 adapter, Index, RcvRingInfo->Head, RcvRingInfo->Tail);
4149
J.R. Maurob243c4a2008-10-20 19:28:58 -04004150 /* Now grab the RcvQLock lock and proceed */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004151 spin_lock(&adapter->RcvQLock);
4152 ASSERT(Index != RcvRingInfo->Tail);
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05304153 while (sxg_ring_get_forward_diff(RcvRingInfo, Index,
4154 RcvRingInfo->Tail) > 3) {
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05304155 /*
4156 * Locate the current Cmd (ring descriptor entry), and
4157 * associated receive descriptor block, and advance
4158 * the tail
4159 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004160 SXG_RETURN_CMD(RingZero,
4161 RcvRingInfo,
4162 RingDescriptorCmd, RcvDescriptorBlockHdr);
4163 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpRBlk",
4164 RcvRingInfo->Head, RcvRingInfo->Tail,
4165 RingDescriptorCmd, RcvDescriptorBlockHdr);
4166
J.R. Maurob243c4a2008-10-20 19:28:58 -04004167 /* Clear the SGL field */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004168 RingDescriptorCmd->Sgl = 0;
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05304169 /*
4170 * Attempt to refill it and hand it right back to the
4171 * card. If we fail to refill it, free the descriptor block
4172 * header. The card will be restocked later via the
4173 * RcvBuffersOnCard test
4174 */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05304175 if (sxg_fill_descriptor_block(adapter,
4176 RcvDescriptorBlockHdr) == STATUS_FAILURE)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004177 SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter,
4178 RcvDescriptorBlockHdr);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004179 }
4180 spin_unlock(&adapter->RcvQLock);
4181 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XCRBlks",
4182 adapter, Index, RcvRingInfo->Head, RcvRingInfo->Tail);
4183}
4184
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05304185/*
4186 * Read the statistics which the card has been maintaining.
4187 */
4188void sxg_collect_statistics(struct adapter_t *adapter)
4189{
4190 if(adapter->ucode_stats)
Mithlesh Thukral54aed112009-01-19 20:27:17 +05304191 WRITE_REG64(adapter, adapter->UcodeRegs[0].GetUcodeStats,
4192 adapter->pucode_stats, 0);
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05304193 adapter->stats.rx_fifo_errors = adapter->ucode_stats->ERDrops;
4194 adapter->stats.rx_over_errors = adapter->ucode_stats->NBDrops;
4195 adapter->stats.tx_fifo_errors = adapter->ucode_stats->XDrops;
4196}
4197
4198static struct net_device_stats *sxg_get_stats(struct net_device * dev)
4199{
4200 struct adapter_t *adapter = netdev_priv(dev);
4201
4202 sxg_collect_statistics(adapter);
4203 return (&adapter->stats);
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05304204}
4205
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004206static struct pci_driver sxg_driver = {
Mithlesh Thukral371d7a92009-01-19 20:22:34 +05304207 .name = sxg_driver_name,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004208 .id_table = sxg_pci_tbl,
4209 .probe = sxg_entry_probe,
4210 .remove = sxg_entry_remove,
4211#if SXG_POWER_MANAGEMENT_ENABLED
4212 .suspend = sxgpm_suspend,
4213 .resume = sxgpm_resume,
4214#endif
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05304215 /* .shutdown = slic_shutdown, MOOK_INVESTIGATE */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004216};
4217
4218static int __init sxg_module_init(void)
4219{
4220 sxg_init_driver();
4221
4222 if (debug >= 0)
4223 sxg_debug = debug;
4224
4225 return pci_register_driver(&sxg_driver);
4226}
4227
4228static void __exit sxg_module_cleanup(void)
4229{
4230 pci_unregister_driver(&sxg_driver);
4231}
4232
4233module_init(sxg_module_init);
4234module_exit(sxg_module_cleanup);