blob: 990ad561736fb22539175aaf8d0080c7bbcbfcba [file] [log] [blame]
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001/**************************************************************************
2 *
3 * Copyright (C) 2000-2008 Alacritech, Inc. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above
12 * copyright notice, this list of conditions and the following
13 * disclaimer in the documentation and/or other materials provided
14 * with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY ALACRITECH, INC. ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ALACRITECH, INC. OR
20 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * The views and conclusions contained in the software and documentation
30 * are those of the authors and should not be interpreted as representing
31 * official policies, either expressed or implied, of Alacritech, Inc.
32 *
Mithlesh Thukral0d414722009-01-19 20:29:59 +053033 * Parts developed by LinSysSoft Sahara team
34 *
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -070035 **************************************************************************/
36
37/*
38 * FILENAME: sxg.c
39 *
40 * The SXG driver for Alacritech's 10Gbe products.
41 *
42 * NOTE: This is the standard, non-accelerated version of Alacritech's
43 * IS-NIC driver.
44 */
45
46#include <linux/kernel.h>
47#include <linux/string.h>
48#include <linux/errno.h>
49#include <linux/module.h>
50#include <linux/moduleparam.h>
51#include <linux/ioport.h>
52#include <linux/slab.h>
53#include <linux/interrupt.h>
54#include <linux/timer.h>
55#include <linux/pci.h>
56#include <linux/spinlock.h>
57#include <linux/init.h>
58#include <linux/netdevice.h>
59#include <linux/etherdevice.h>
60#include <linux/ethtool.h>
61#include <linux/skbuff.h>
62#include <linux/delay.h>
63#include <linux/types.h>
64#include <linux/dma-mapping.h>
65#include <linux/mii.h>
Mithlesh Thukral0d414722009-01-19 20:29:59 +053066#include <linux/ip.h>
67#include <linux/in.h>
68#include <linux/tcp.h>
69#include <linux/ipv6.h>
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -070070
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -070071#define SLIC_GET_STATS_ENABLED 0
72#define LINUX_FREES_ADAPTER_RESOURCES 1
73#define SXG_OFFLOAD_IP_CHECKSUM 0
74#define SXG_POWER_MANAGEMENT_ENABLED 0
75#define VPCI 0
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -070076#define ATK_DEBUG 1
77
78#include "sxg_os.h"
79#include "sxghw.h"
80#include "sxghif.h"
81#include "sxg.h"
82#include "sxgdbg.h"
83
84#include "sxgphycode.h"
Mithlesh Thukrala3915dd2009-01-19 20:28:13 +053085#define SXG_UCODE_DBG 0 /* Turn on for debugging */
86#ifdef SXG_UCODE_DBG
87#include "saharadbgdownload.c"
88#include "saharadbgdownloadB.c"
89#else
90#include "saharadownload.c"
91#include "saharadownloadB.c"
92#endif
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -070093
J.R. Mauro73b07062008-10-28 18:42:02 -040094static int sxg_allocate_buffer_memory(struct adapter_t *adapter, u32 Size,
Mithlesh Thukral942798b2009-01-05 21:14:34 +053095 enum sxg_buffer_type BufferType);
Mithlesh Thukral0d414722009-01-19 20:29:59 +053096static int sxg_allocate_rcvblock_complete(struct adapter_t *adapter,
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +053097 void *RcvBlock,
98 dma_addr_t PhysicalAddress,
99 u32 Length);
J.R. Mauro73b07062008-10-28 18:42:02 -0400100static void sxg_allocate_sgl_buffer_complete(struct adapter_t *adapter,
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530101 struct sxg_scatter_gather *SxgSgl,
J.R. Mauro5c7514e2008-10-05 20:38:52 -0400102 dma_addr_t PhysicalAddress,
103 u32 Length);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700104
105static void sxg_mcast_init_crc32(void);
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530106static int sxg_entry_open(struct net_device *dev);
Mithlesh Thukral0d414722009-01-19 20:29:59 +0530107static int sxg_second_open(struct net_device * dev);
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530108static int sxg_entry_halt(struct net_device *dev);
109static int sxg_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
110static int sxg_send_packets(struct sk_buff *skb, struct net_device *dev);
J.R. Mauro73b07062008-10-28 18:42:02 -0400111static int sxg_transmit_packet(struct adapter_t *adapter, struct sk_buff *skb);
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +0530112static int sxg_dumb_sgl(struct sxg_x64_sgl *pSgl,
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530113 struct sxg_scatter_gather *SxgSgl);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700114
Mithlesh Thukralb62a2942009-01-30 20:19:03 +0530115static void sxg_handle_interrupt(struct adapter_t *adapter, int *work_done,
116 int budget);
117static void sxg_interrupt(struct adapter_t *adapter);
118static int sxg_poll(struct napi_struct *napi, int budget);
J.R. Mauro73b07062008-10-28 18:42:02 -0400119static int sxg_process_isr(struct adapter_t *adapter, u32 MessageId);
Mithlesh Thukralb62a2942009-01-30 20:19:03 +0530120static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId,
121 int *sxg_napi_continue, int *work_done, int budget);
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +0530122static void sxg_complete_slow_send(struct adapter_t *adapter, int irq_context);
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530123static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter,
124 struct sxg_event *Event);
J.R. Mauro73b07062008-10-28 18:42:02 -0400125static void sxg_process_rcv_error(struct adapter_t *adapter, u32 ErrorStatus);
126static bool sxg_mac_filter(struct adapter_t *adapter,
127 struct ether_header *EtherHdr, ushort length);
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +0530128static struct net_device_stats *sxg_get_stats(struct net_device * dev);
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +0530129void sxg_free_resources(struct adapter_t *adapter);
130void sxg_free_rcvblocks(struct adapter_t *adapter);
131void sxg_free_sgl_buffers(struct adapter_t *adapter);
132void sxg_unmap_resources(struct adapter_t *adapter);
133void sxg_free_mcast_addrs(struct adapter_t *adapter);
134void sxg_collect_statistics(struct adapter_t *adapter);
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +0530135
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -0700136#define XXXTODO 0
137
Greg Kroah-Hartman96e70882009-01-21 08:17:45 -0800138#if XXXTODO
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530139static int sxg_mac_set_address(struct net_device *dev, void *ptr);
Greg Kroah-Hartman96e70882009-01-21 08:17:45 -0800140#endif
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530141static void sxg_mcast_set_list(struct net_device *dev);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700142
Mithlesh Thukral54aed112009-01-19 20:27:17 +0530143static int sxg_adapter_set_hwaddr(struct adapter_t *adapter);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700144
J.R. Mauro73b07062008-10-28 18:42:02 -0400145static int sxg_initialize_adapter(struct adapter_t *adapter);
146static void sxg_stock_rcv_buffers(struct adapter_t *adapter);
147static void sxg_complete_descriptor_blocks(struct adapter_t *adapter,
J.R. Mauro5c7514e2008-10-05 20:38:52 -0400148 unsigned char Index);
J.R. Mauro73b07062008-10-28 18:42:02 -0400149static int sxg_initialize_link(struct adapter_t *adapter);
150static int sxg_phy_init(struct adapter_t *adapter);
151static void sxg_link_event(struct adapter_t *adapter);
152static enum SXG_LINK_STATE sxg_get_link_state(struct adapter_t *adapter);
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530153static void sxg_link_state(struct adapter_t *adapter,
154 enum SXG_LINK_STATE LinkState);
J.R. Mauro73b07062008-10-28 18:42:02 -0400155static int sxg_write_mdio_reg(struct adapter_t *adapter,
J.R. Mauro5c7514e2008-10-05 20:38:52 -0400156 u32 DevAddr, u32 RegAddr, u32 Value);
J.R. Mauro73b07062008-10-28 18:42:02 -0400157static int sxg_read_mdio_reg(struct adapter_t *adapter,
J.R. Mauro5c7514e2008-10-05 20:38:52 -0400158 u32 DevAddr, u32 RegAddr, u32 *pValue);
Mithlesh Thukralb040b072009-01-28 07:08:11 +0530159static void sxg_set_mcast_addr(struct adapter_t *adapter);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700160
161static unsigned int sxg_first_init = 1;
162static char *sxg_banner =
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530163 "Alacritech SLIC Technology(tm) Server and Storage \
164 10Gbe Accelerator (Non-Accelerated)\n";
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700165
166static int sxg_debug = 1;
167static int debug = -1;
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530168static struct net_device *head_netdevice = NULL;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700169
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530170static struct sxgbase_driver sxg_global = {
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700171 .dynamic_intagg = 1,
172};
173static int intagg_delay = 100;
174static u32 dynamic_intagg = 0;
175
Mithlesh Thukral54aed112009-01-19 20:27:17 +0530176char sxg_driver_name[] = "sxg_nic";
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700177#define DRV_AUTHOR "Alacritech, Inc. Engineering"
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530178#define DRV_DESCRIPTION \
179 "Alacritech SLIC Techonology(tm) Non-Accelerated 10Gbe Driver"
180#define DRV_COPYRIGHT \
181 "Copyright 2000-2008 Alacritech, Inc. All rights reserved."
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700182
183MODULE_AUTHOR(DRV_AUTHOR);
184MODULE_DESCRIPTION(DRV_DESCRIPTION);
185MODULE_LICENSE("GPL");
186
187module_param(dynamic_intagg, int, 0);
188MODULE_PARM_DESC(dynamic_intagg, "Dynamic Interrupt Aggregation Setting");
189module_param(intagg_delay, int, 0);
190MODULE_PARM_DESC(intagg_delay, "uSec Interrupt Aggregation Delay");
191
192static struct pci_device_id sxg_pci_tbl[] __devinitdata = {
193 {PCI_DEVICE(SXG_VENDOR_ID, SXG_DEVICE_ID)},
194 {0,}
195};
J.R. Mauro5c7514e2008-10-05 20:38:52 -0400196
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700197MODULE_DEVICE_TABLE(pci, sxg_pci_tbl);
198
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700199static inline void sxg_reg32_write(void __iomem *reg, u32 value, bool flush)
200{
201 writel(value, reg);
202 if (flush)
203 mb();
204}
205
J.R. Mauro73b07062008-10-28 18:42:02 -0400206static inline void sxg_reg64_write(struct adapter_t *adapter, void __iomem *reg,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700207 u64 value, u32 cpu)
208{
209 u32 value_high = (u32) (value >> 32);
210 u32 value_low = (u32) (value & 0x00000000FFFFFFFF);
211 unsigned long flags;
212
213 spin_lock_irqsave(&adapter->Bit64RegLock, flags);
214 writel(value_high, (void __iomem *)(&adapter->UcodeRegs[cpu].Upper));
215 writel(value_low, reg);
216 spin_unlock_irqrestore(&adapter->Bit64RegLock, flags);
217}
218
219static void sxg_init_driver(void)
220{
221 if (sxg_first_init) {
222 DBG_ERROR("sxg: %s sxg_first_init set jiffies[%lx]\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -0700223 __func__, jiffies);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700224 sxg_first_init = 0;
225 spin_lock_init(&sxg_global.driver_lock);
226 }
227}
228
J.R. Mauro73b07062008-10-28 18:42:02 -0400229static void sxg_dbg_macaddrs(struct adapter_t *adapter)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700230{
231 DBG_ERROR(" (%s) curr %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
232 adapter->netdev->name, adapter->currmacaddr[0],
233 adapter->currmacaddr[1], adapter->currmacaddr[2],
234 adapter->currmacaddr[3], adapter->currmacaddr[4],
235 adapter->currmacaddr[5]);
236 DBG_ERROR(" (%s) mac %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
237 adapter->netdev->name, adapter->macaddr[0],
238 adapter->macaddr[1], adapter->macaddr[2],
239 adapter->macaddr[3], adapter->macaddr[4],
240 adapter->macaddr[5]);
241 return;
242}
243
J.R. Maurob243c4a2008-10-20 19:28:58 -0400244/* SXG Globals */
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530245static struct sxg_driver SxgDriver;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700246
247#ifdef ATKDBG
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530248static struct sxg_trace_buffer LSxgTraceBuffer;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700249#endif /* ATKDBG */
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530250static struct sxg_trace_buffer *SxgTraceBuffer = NULL;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700251
252/*
253 * sxg_download_microcode
254 *
255 * Download Microcode to Sahara adapter
256 *
257 * Arguments -
258 * adapter - A pointer to our adapter structure
259 * UcodeSel - microcode file selection
260 *
261 * Return
262 * int
263 */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530264static bool sxg_download_microcode(struct adapter_t *adapter,
265 enum SXG_UCODE_SEL UcodeSel)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700266{
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530267 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700268 u32 Section;
269 u32 ThisSectionSize;
J.R. Mauro5c7514e2008-10-05 20:38:52 -0400270 u32 *Instruction = NULL;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700271 u32 BaseAddress, AddressOffset, Address;
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530272 /* u32 Failure; */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700273 u32 ValueRead;
274 u32 i;
275 u32 numSections = 0;
276 u32 sectionSize[16];
277 u32 sectionStart[16];
278
279 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DnldUcod",
280 adapter, 0, 0, 0);
Harvey Harrisone88bd232008-10-17 14:46:10 -0700281 DBG_ERROR("sxg: %s ENTER\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700282
283 switch (UcodeSel) {
J.R. Maurob243c4a2008-10-20 19:28:58 -0400284 case SXG_UCODE_SAHARA: /* Sahara operational ucode */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700285 numSections = SNumSections;
286 for (i = 0; i < numSections; i++) {
287 sectionSize[i] = SSectionSize[i];
288 sectionStart[i] = SSectionStart[i];
289 }
290 break;
291 default:
292 printk(KERN_ERR KBUILD_MODNAME
293 ": Woah, big error with the microcode!\n");
294 break;
295 }
296
297 DBG_ERROR("sxg: RESET THE CARD\n");
J.R. Maurob243c4a2008-10-20 19:28:58 -0400298 /* First, reset the card */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700299 WRITE_REG(HwRegs->Reset, 0xDEAD, FLUSH);
300
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +0530301 /*
302 * Download each section of the microcode as specified in
303 * its download file. The *download.c file is generated using
304 * the saharaobjtoc facility which converts the metastep .obj
305 * file to a .c file which contains a two dimentional array.
306 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700307 for (Section = 0; Section < numSections; Section++) {
308 DBG_ERROR("sxg: SECTION # %d\n", Section);
309 switch (UcodeSel) {
310 case SXG_UCODE_SAHARA:
311 Instruction = (u32 *) & SaharaUCode[Section][0];
312 break;
313 default:
314 ASSERT(0);
315 break;
316 }
317 BaseAddress = sectionStart[Section];
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530318 /* Size in instructions */
319 ThisSectionSize = sectionSize[Section] / 12;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700320 for (AddressOffset = 0; AddressOffset < ThisSectionSize;
321 AddressOffset++) {
322 Address = BaseAddress + AddressOffset;
323 ASSERT((Address & ~MICROCODE_ADDRESS_MASK) == 0);
J.R. Maurob243c4a2008-10-20 19:28:58 -0400324 /* Write instruction bits 31 - 0 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700325 WRITE_REG(HwRegs->UcodeDataLow, *Instruction, FLUSH);
J.R. Maurob243c4a2008-10-20 19:28:58 -0400326 /* Write instruction bits 63-32 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700327 WRITE_REG(HwRegs->UcodeDataMiddle, *(Instruction + 1),
328 FLUSH);
J.R. Maurob243c4a2008-10-20 19:28:58 -0400329 /* Write instruction bits 95-64 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700330 WRITE_REG(HwRegs->UcodeDataHigh, *(Instruction + 2),
331 FLUSH);
J.R. Maurob243c4a2008-10-20 19:28:58 -0400332 /* Write instruction address with the WRITE bit set */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700333 WRITE_REG(HwRegs->UcodeAddr,
334 (Address | MICROCODE_ADDRESS_WRITE), FLUSH);
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +0530335 /*
336 * Sahara bug in the ucode download logic - the write to DataLow
337 * for the next instruction could get corrupted. To avoid this,
338 * write to DataLow again for this instruction (which may get
339 * corrupted, but it doesn't matter), then increment the address
340 * and write the data for the next instruction to DataLow. That
341 * write should succeed.
342 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700343 WRITE_REG(HwRegs->UcodeDataLow, *Instruction, TRUE);
J.R. Maurob243c4a2008-10-20 19:28:58 -0400344 /* Advance 3 u32S to start of next instruction */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700345 Instruction += 3;
346 }
347 }
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +0530348 /*
349 * Now repeat the entire operation reading the instruction back and
350 * checking for parity errors
351 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700352 for (Section = 0; Section < numSections; Section++) {
353 DBG_ERROR("sxg: check SECTION # %d\n", Section);
354 switch (UcodeSel) {
355 case SXG_UCODE_SAHARA:
356 Instruction = (u32 *) & SaharaUCode[Section][0];
357 break;
358 default:
359 ASSERT(0);
360 break;
361 }
362 BaseAddress = sectionStart[Section];
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530363 /* Size in instructions */
364 ThisSectionSize = sectionSize[Section] / 12;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700365 for (AddressOffset = 0; AddressOffset < ThisSectionSize;
366 AddressOffset++) {
367 Address = BaseAddress + AddressOffset;
J.R. Maurob243c4a2008-10-20 19:28:58 -0400368 /* Write the address with the READ bit set */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700369 WRITE_REG(HwRegs->UcodeAddr,
370 (Address | MICROCODE_ADDRESS_READ), FLUSH);
J.R. Maurob243c4a2008-10-20 19:28:58 -0400371 /* Read it back and check parity bit. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700372 READ_REG(HwRegs->UcodeAddr, ValueRead);
373 if (ValueRead & MICROCODE_ADDRESS_PARITY) {
374 DBG_ERROR("sxg: %s PARITY ERROR\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -0700375 __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700376
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530377 return FALSE; /* Parity error */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700378 }
379 ASSERT((ValueRead & MICROCODE_ADDRESS_MASK) == Address);
J.R. Maurob243c4a2008-10-20 19:28:58 -0400380 /* Read the instruction back and compare */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700381 READ_REG(HwRegs->UcodeDataLow, ValueRead);
382 if (ValueRead != *Instruction) {
383 DBG_ERROR("sxg: %s MISCOMPARE LOW\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -0700384 __func__);
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530385 return FALSE; /* Miscompare */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700386 }
387 READ_REG(HwRegs->UcodeDataMiddle, ValueRead);
388 if (ValueRead != *(Instruction + 1)) {
389 DBG_ERROR("sxg: %s MISCOMPARE MIDDLE\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -0700390 __func__);
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530391 return FALSE; /* Miscompare */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700392 }
393 READ_REG(HwRegs->UcodeDataHigh, ValueRead);
394 if (ValueRead != *(Instruction + 2)) {
395 DBG_ERROR("sxg: %s MISCOMPARE HIGH\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -0700396 __func__);
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530397 return FALSE; /* Miscompare */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700398 }
J.R. Maurob243c4a2008-10-20 19:28:58 -0400399 /* Advance 3 u32S to start of next instruction */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700400 Instruction += 3;
401 }
402 }
403
J.R. Maurob243c4a2008-10-20 19:28:58 -0400404 /* Everything OK, Go. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700405 WRITE_REG(HwRegs->UcodeAddr, MICROCODE_ADDRESS_GO, FLUSH);
406
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +0530407 /*
408 * Poll the CardUp register to wait for microcode to initialize
409 * Give up after 10,000 attemps (500ms).
410 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700411 for (i = 0; i < 10000; i++) {
412 udelay(50);
413 READ_REG(adapter->UcodeRegs[0].CardUp, ValueRead);
414 if (ValueRead == 0xCAFE) {
Harvey Harrisone88bd232008-10-17 14:46:10 -0700415 DBG_ERROR("sxg: %s BOO YA 0xCAFE\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700416 break;
417 }
418 }
419 if (i == 10000) {
Harvey Harrisone88bd232008-10-17 14:46:10 -0700420 DBG_ERROR("sxg: %s TIMEOUT\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700421
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530422 return FALSE; /* Timeout */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700423 }
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +0530424 /*
425 * Now write the LoadSync register. This is used to
426 * synchronize with the card so it can scribble on the memory
427 * that contained 0xCAFE from the "CardUp" step above
428 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700429 if (UcodeSel == SXG_UCODE_SAHARA) {
430 WRITE_REG(adapter->UcodeRegs[0].LoadSync, 0, FLUSH);
431 }
432
433 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDnldUcd",
434 adapter, 0, 0, 0);
Harvey Harrisone88bd232008-10-17 14:46:10 -0700435 DBG_ERROR("sxg: %s EXIT\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700436
437 return (TRUE);
438}
439
440/*
441 * sxg_allocate_resources - Allocate memory and locks
442 *
443 * Arguments -
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530444 * adapter - A pointer to our adapter structure
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700445 *
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530446 * Return - int
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700447 */
J.R. Mauro73b07062008-10-28 18:42:02 -0400448static int sxg_allocate_resources(struct adapter_t *adapter)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700449{
450 int status;
451 u32 i;
452 u32 RssIds, IsrCount;
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530453 /* struct sxg_xmt_ring *XmtRing; */
454 /* struct sxg_rcv_ring *RcvRing; */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700455
Harvey Harrisone88bd232008-10-17 14:46:10 -0700456 DBG_ERROR("%s ENTER\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700457
458 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocRes",
459 adapter, 0, 0, 0);
460
J.R. Maurob243c4a2008-10-20 19:28:58 -0400461 /* Windows tells us how many CPUs it plans to use for */
462 /* RSS */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700463 RssIds = SXG_RSS_CPU_COUNT(adapter);
464 IsrCount = adapter->MsiEnabled ? RssIds : 1;
465
Harvey Harrisone88bd232008-10-17 14:46:10 -0700466 DBG_ERROR("%s Setup the spinlocks\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700467
J.R. Maurob243c4a2008-10-20 19:28:58 -0400468 /* Allocate spinlocks and initialize listheads first. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700469 spin_lock_init(&adapter->RcvQLock);
470 spin_lock_init(&adapter->SglQLock);
471 spin_lock_init(&adapter->XmtZeroLock);
472 spin_lock_init(&adapter->Bit64RegLock);
473 spin_lock_init(&adapter->AdapterLock);
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +0530474 atomic_set(&adapter->pending_allocations, 0);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700475
Harvey Harrisone88bd232008-10-17 14:46:10 -0700476 DBG_ERROR("%s Setup the lists\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700477
478 InitializeListHead(&adapter->FreeRcvBuffers);
479 InitializeListHead(&adapter->FreeRcvBlocks);
480 InitializeListHead(&adapter->AllRcvBlocks);
481 InitializeListHead(&adapter->FreeSglBuffers);
482 InitializeListHead(&adapter->AllSglBuffers);
483
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +0530484 /*
485 * Mark these basic allocations done. This flags essentially
486 * tells the SxgFreeResources routine that it can grab spinlocks
487 * and reference listheads.
488 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700489 adapter->BasicAllocations = TRUE;
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +0530490 /*
491 * Main allocation loop. Start with the maximum supported by
492 * the microcode and back off if memory allocation
493 * fails. If we hit a minimum, fail.
494 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700495
496 for (;;) {
Greg Kroah-Hartmand78404c2008-10-21 10:41:45 -0700497 DBG_ERROR("%s Allocate XmtRings size[%x]\n", __func__,
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530498 (unsigned int)(sizeof(struct sxg_xmt_ring) * 1));
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700499
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +0530500 /*
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530501 * Start with big items first - receive and transmit rings.
502 * At the moment I'm going to keep the ring size fixed and
503 * adjust the TCBs if we fail. Later we might
504 * consider reducing the ring size as well..
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +0530505 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700506 adapter->XmtRings = pci_alloc_consistent(adapter->pcidev,
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530507 sizeof(struct sxg_xmt_ring) *
508 1,
509 &adapter->PXmtRings);
Harvey Harrisone88bd232008-10-17 14:46:10 -0700510 DBG_ERROR("%s XmtRings[%p]\n", __func__, adapter->XmtRings);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700511
512 if (!adapter->XmtRings) {
513 goto per_tcb_allocation_failed;
514 }
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530515 memset(adapter->XmtRings, 0, sizeof(struct sxg_xmt_ring) * 1);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700516
Greg Kroah-Hartmand78404c2008-10-21 10:41:45 -0700517 DBG_ERROR("%s Allocate RcvRings size[%x]\n", __func__,
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530518 (unsigned int)(sizeof(struct sxg_rcv_ring) * 1));
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700519 adapter->RcvRings =
520 pci_alloc_consistent(adapter->pcidev,
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530521 sizeof(struct sxg_rcv_ring) * 1,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700522 &adapter->PRcvRings);
Harvey Harrisone88bd232008-10-17 14:46:10 -0700523 DBG_ERROR("%s RcvRings[%p]\n", __func__, adapter->RcvRings);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700524 if (!adapter->RcvRings) {
525 goto per_tcb_allocation_failed;
526 }
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530527 memset(adapter->RcvRings, 0, sizeof(struct sxg_rcv_ring) * 1);
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +0530528 adapter->ucode_stats = kzalloc(sizeof(struct sxg_ucode_stats), GFP_ATOMIC);
529 adapter->pucode_stats = pci_map_single(adapter->pcidev,
530 adapter->ucode_stats,
531 sizeof(struct sxg_ucode_stats),
532 PCI_DMA_FROMDEVICE);
533// memset(adapter->ucode_stats, 0, sizeof(struct sxg_ucode_stats));
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700534 break;
535
536 per_tcb_allocation_failed:
J.R. Maurob243c4a2008-10-20 19:28:58 -0400537 /* an allocation failed. Free any successful allocations. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700538 if (adapter->XmtRings) {
539 pci_free_consistent(adapter->pcidev,
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530540 sizeof(struct sxg_xmt_ring) * 1,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700541 adapter->XmtRings,
542 adapter->PXmtRings);
543 adapter->XmtRings = NULL;
544 }
545 if (adapter->RcvRings) {
546 pci_free_consistent(adapter->pcidev,
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530547 sizeof(struct sxg_rcv_ring) * 1,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700548 adapter->RcvRings,
549 adapter->PRcvRings);
550 adapter->RcvRings = NULL;
551 }
J.R. Maurob243c4a2008-10-20 19:28:58 -0400552 /* Loop around and try again.... */
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +0530553 if (adapter->ucode_stats) {
554 pci_unmap_single(adapter->pcidev,
555 sizeof(struct sxg_ucode_stats),
556 adapter->pucode_stats, PCI_DMA_FROMDEVICE);
557 adapter->ucode_stats = NULL;
558 }
559
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700560 }
561
Harvey Harrisone88bd232008-10-17 14:46:10 -0700562 DBG_ERROR("%s Initialize RCV ZERO and XMT ZERO rings\n", __func__);
J.R. Maurob243c4a2008-10-20 19:28:58 -0400563 /* Initialize rcv zero and xmt zero rings */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700564 SXG_INITIALIZE_RING(adapter->RcvRingZeroInfo, SXG_RCV_RING_SIZE);
565 SXG_INITIALIZE_RING(adapter->XmtRingZeroInfo, SXG_XMT_RING_SIZE);
566
J.R. Maurob243c4a2008-10-20 19:28:58 -0400567 /* Sanity check receive data structure format */
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +0530568 /* ASSERT((adapter->ReceiveBufferSize == SXG_RCV_DATA_BUFFER_SIZE) ||
569 (adapter->ReceiveBufferSize == SXG_RCV_JUMBO_BUFFER_SIZE)); */
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530570 ASSERT(sizeof(struct sxg_rcv_descriptor_block) ==
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700571 SXG_RCV_DESCRIPTOR_BLOCK_SIZE);
572
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +0530573 /*
574 * Allocate receive data buffers. We allocate a block of buffers and
575 * a corresponding descriptor block at once. See sxghw.h:SXG_RCV_BLOCK
576 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700577 for (i = 0; i < SXG_INITIAL_RCV_DATA_BUFFERS;
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530578 i += SXG_RCV_DESCRIPTORS_PER_BLOCK) {
Mithlesh Thukral0d414722009-01-19 20:29:59 +0530579 status = sxg_allocate_buffer_memory(adapter,
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +0530580 SXG_RCV_BLOCK_SIZE(SXG_RCV_DATA_HDR_SIZE),
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700581 SXG_BUFFER_TYPE_RCV);
Mithlesh Thukral0d414722009-01-19 20:29:59 +0530582 if (status != STATUS_SUCCESS)
583 return status;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700584 }
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +0530585 /*
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530586 * NBL resource allocation can fail in the 'AllocateComplete' routine,
587 * which doesn't return status. Make sure we got the number of buffers
588 * we requested
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +0530589 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700590 if (adapter->FreeRcvBufferCount < SXG_INITIAL_RCV_DATA_BUFFERS) {
591 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF6",
592 adapter, adapter->FreeRcvBufferCount, SXG_MAX_ENTRIES,
593 0);
594 return (STATUS_RESOURCES);
595 }
596
Greg Kroah-Hartmand78404c2008-10-21 10:41:45 -0700597 DBG_ERROR("%s Allocate EventRings size[%x]\n", __func__,
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530598 (unsigned int)(sizeof(struct sxg_event_ring) * RssIds));
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700599
J.R. Maurob243c4a2008-10-20 19:28:58 -0400600 /* Allocate event queues. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700601 adapter->EventRings = pci_alloc_consistent(adapter->pcidev,
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530602 sizeof(struct sxg_event_ring) *
603 RssIds,
604 &adapter->PEventRings);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700605
606 if (!adapter->EventRings) {
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530607 /* Caller will call SxgFreeAdapter to clean up above
608 * allocations */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700609 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF8",
610 adapter, SXG_MAX_ENTRIES, 0, 0);
611 status = STATUS_RESOURCES;
612 goto per_tcb_allocation_failed;
613 }
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530614 memset(adapter->EventRings, 0, sizeof(struct sxg_event_ring) * RssIds);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700615
Harvey Harrisone88bd232008-10-17 14:46:10 -0700616 DBG_ERROR("%s Allocate ISR size[%x]\n", __func__, IsrCount);
J.R. Maurob243c4a2008-10-20 19:28:58 -0400617 /* Allocate ISR */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700618 adapter->Isr = pci_alloc_consistent(adapter->pcidev,
619 IsrCount, &adapter->PIsr);
620 if (!adapter->Isr) {
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530621 /* Caller will call SxgFreeAdapter to clean up above
622 * allocations */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700623 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF9",
624 adapter, SXG_MAX_ENTRIES, 0, 0);
625 status = STATUS_RESOURCES;
626 goto per_tcb_allocation_failed;
627 }
628 memset(adapter->Isr, 0, sizeof(u32) * IsrCount);
629
Greg Kroah-Hartmand78404c2008-10-21 10:41:45 -0700630 DBG_ERROR("%s Allocate shared XMT ring zero index location size[%x]\n",
631 __func__, (unsigned int)sizeof(u32));
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700632
J.R. Maurob243c4a2008-10-20 19:28:58 -0400633 /* Allocate shared XMT ring zero index location */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700634 adapter->XmtRingZeroIndex = pci_alloc_consistent(adapter->pcidev,
635 sizeof(u32),
636 &adapter->
637 PXmtRingZeroIndex);
638 if (!adapter->XmtRingZeroIndex) {
639 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF10",
640 adapter, SXG_MAX_ENTRIES, 0, 0);
641 status = STATUS_RESOURCES;
642 goto per_tcb_allocation_failed;
643 }
644 memset(adapter->XmtRingZeroIndex, 0, sizeof(u32));
645
646 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlcResS",
647 adapter, SXG_MAX_ENTRIES, 0, 0);
648
Mithlesh Thukral0d414722009-01-19 20:29:59 +0530649 return status;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700650}
651
652/*
653 * sxg_config_pci -
654 *
655 * Set up PCI Configuration space
656 *
657 * Arguments -
658 * pcidev - A pointer to our adapter structure
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700659 */
660static void sxg_config_pci(struct pci_dev *pcidev)
661{
662 u16 pci_command;
663 u16 new_command;
664
665 pci_read_config_word(pcidev, PCI_COMMAND, &pci_command);
Harvey Harrisone88bd232008-10-17 14:46:10 -0700666 DBG_ERROR("sxg: %s PCI command[%4.4x]\n", __func__, pci_command);
J.R. Maurob243c4a2008-10-20 19:28:58 -0400667 /* Set the command register */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530668 new_command = pci_command | (
669 /* Memory Space Enable */
670 PCI_COMMAND_MEMORY |
671 /* Bus master enable */
672 PCI_COMMAND_MASTER |
673 /* Memory write and invalidate */
674 PCI_COMMAND_INVALIDATE |
675 /* Parity error response */
676 PCI_COMMAND_PARITY |
677 /* System ERR */
678 PCI_COMMAND_SERR |
679 /* Fast back-to-back */
680 PCI_COMMAND_FAST_BACK);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700681 if (pci_command != new_command) {
682 DBG_ERROR("%s -- Updating PCI COMMAND register %4.4x->%4.4x.\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -0700683 __func__, pci_command, new_command);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700684 pci_write_config_word(pcidev, PCI_COMMAND, new_command);
685 }
686}
687
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +0530688/*
689 * sxg_read_config
690 * @adapter : Pointer to the adapter structure for the card
691 * This function will read the configuration data from EEPROM/FLASH
692 */
693static inline int sxg_read_config(struct adapter_t *adapter)
694{
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +0530695 /* struct sxg_config data; */
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530696 struct sw_cfg_data *data;
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +0530697 dma_addr_t p_addr;
698 unsigned long status;
699 unsigned long i;
700
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530701 data = pci_alloc_consistent(adapter->pcidev,
702 sizeof(struct sw_cfg_data), &p_addr);
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +0530703 if(!data) {
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +0530704 /*
705 * We cant get even this much memory. Raise a hell
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +0530706 * Get out of here
707 */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530708 printk(KERN_ERR"%s : Could not allocate memory for reading \
709 EEPROM\n", __FUNCTION__);
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +0530710 return -ENOMEM;
711 }
712
713 WRITE_REG(adapter->UcodeRegs[0].ConfigStat, SXG_CFG_TIMEOUT, TRUE);
714
715 WRITE_REG64(adapter, adapter->UcodeRegs[0].Config, p_addr, 0);
716 for(i=0; i<1000; i++) {
717 READ_REG(adapter->UcodeRegs[0].ConfigStat, status);
718 if (status != SXG_CFG_TIMEOUT)
719 break;
720 mdelay(1); /* Do we really need this */
721 }
722
723 switch(status) {
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530724 /* Config read from EEPROM succeeded */
725 case SXG_CFG_LOAD_EEPROM:
726 /* Config read from Flash succeeded */
727 case SXG_CFG_LOAD_FLASH:
728 /* Copy the MAC address to adapter structure */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530729 /* TODO: We are not doing the remaining part : FRU,
730 * etc
731 */
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +0530732 memcpy(adapter->macaddr, data->MacAddr[0].MacAddr,
733 sizeof(struct sxg_config_mac));
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530734 break;
735 case SXG_CFG_TIMEOUT:
736 case SXG_CFG_LOAD_INVALID:
737 case SXG_CFG_LOAD_ERROR:
738 default: /* Fix default handler later */
739 printk(KERN_WARNING"%s : We could not read the config \
740 word. Status = %ld\n", __FUNCTION__, status);
741 break;
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +0530742 }
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530743 pci_free_consistent(adapter->pcidev, sizeof(struct sw_cfg_data), data,
744 p_addr);
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +0530745 if (adapter->netdev) {
746 memcpy(adapter->netdev->dev_addr, adapter->currmacaddr, 6);
747 memcpy(adapter->netdev->perm_addr, adapter->currmacaddr, 6);
748 }
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +0530749 sxg_dbg_macaddrs(adapter);
750
751 return status;
752}
753
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700754static int sxg_entry_probe(struct pci_dev *pcidev,
755 const struct pci_device_id *pci_tbl_entry)
756{
757 static int did_version = 0;
758 int err;
759 struct net_device *netdev;
J.R. Mauro73b07062008-10-28 18:42:02 -0400760 struct adapter_t *adapter;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700761 void __iomem *memmapped_ioaddr;
762 u32 status = 0;
763 ulong mmio_start = 0;
764 ulong mmio_len = 0;
765
766 DBG_ERROR("sxg: %s 2.6 VERSION ENTER jiffies[%lx] cpu %d\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -0700767 __func__, jiffies, smp_processor_id());
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700768
J.R. Maurob243c4a2008-10-20 19:28:58 -0400769 /* Initialize trace buffer */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700770#ifdef ATKDBG
771 SxgTraceBuffer = &LSxgTraceBuffer;
772 SXG_TRACE_INIT(SxgTraceBuffer, TRACE_NOISY);
773#endif
774
775 sxg_global.dynamic_intagg = dynamic_intagg;
776
777 err = pci_enable_device(pcidev);
778
779 DBG_ERROR("Call pci_enable_device(%p) status[%x]\n", pcidev, err);
780 if (err) {
781 return err;
782 }
783
784 if (sxg_debug > 0 && did_version++ == 0) {
785 printk(KERN_INFO "%s\n", sxg_banner);
Mithlesh Thukral371d7a92009-01-19 20:22:34 +0530786 printk(KERN_INFO "%s\n", SXG_DRV_VERSION);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700787 }
788
789 if (!(err = pci_set_dma_mask(pcidev, DMA_64BIT_MASK))) {
790 DBG_ERROR("pci_set_dma_mask(DMA_64BIT_MASK) successful\n");
791 } else {
792 if ((err = pci_set_dma_mask(pcidev, DMA_32BIT_MASK))) {
793 DBG_ERROR
794 ("No usable DMA configuration, aborting err[%x]\n",
795 err);
796 return err;
797 }
798 DBG_ERROR("pci_set_dma_mask(DMA_32BIT_MASK) successful\n");
799 }
800
801 DBG_ERROR("Call pci_request_regions\n");
802
Mithlesh Thukral371d7a92009-01-19 20:22:34 +0530803 err = pci_request_regions(pcidev, sxg_driver_name);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700804 if (err) {
805 DBG_ERROR("pci_request_regions FAILED err[%x]\n", err);
806 return err;
807 }
808
809 DBG_ERROR("call pci_set_master\n");
810 pci_set_master(pcidev);
811
812 DBG_ERROR("call alloc_etherdev\n");
J.R. Mauro73b07062008-10-28 18:42:02 -0400813 netdev = alloc_etherdev(sizeof(struct adapter_t));
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700814 if (!netdev) {
815 err = -ENOMEM;
816 goto err_out_exit_sxg_probe;
817 }
818 DBG_ERROR("alloc_etherdev for slic netdev[%p]\n", netdev);
819
820 SET_NETDEV_DEV(netdev, &pcidev->dev);
821
822 pci_set_drvdata(pcidev, netdev);
823 adapter = netdev_priv(netdev);
824 adapter->netdev = netdev;
825 adapter->pcidev = pcidev;
826
827 mmio_start = pci_resource_start(pcidev, 0);
828 mmio_len = pci_resource_len(pcidev, 0);
829
830 DBG_ERROR("sxg: call ioremap(mmio_start[%lx], mmio_len[%lx])\n",
831 mmio_start, mmio_len);
832
833 memmapped_ioaddr = ioremap(mmio_start, mmio_len);
Harvey Harrisone88bd232008-10-17 14:46:10 -0700834 DBG_ERROR("sxg: %s MEMMAPPED_IOADDR [%p]\n", __func__,
J.R. Mauro5c7514e2008-10-05 20:38:52 -0400835 memmapped_ioaddr);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700836 if (!memmapped_ioaddr) {
837 DBG_ERROR("%s cannot remap MMIO region %lx @ %lx\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -0700838 __func__, mmio_len, mmio_start);
Mithlesh Thukral0d414722009-01-19 20:29:59 +0530839 goto err_out_free_mmio_region_0;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700840 }
841
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530842 DBG_ERROR("sxg: %s found Alacritech SXG PCI, MMIO at %p, start[%lx] \
843 len[%lx], IRQ %d.\n", __func__, memmapped_ioaddr, mmio_start,
844 mmio_len, pcidev->irq);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700845
J.R. Mauro5c7514e2008-10-05 20:38:52 -0400846 adapter->HwRegs = (void *)memmapped_ioaddr;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700847 adapter->base_addr = memmapped_ioaddr;
848
849 mmio_start = pci_resource_start(pcidev, 2);
850 mmio_len = pci_resource_len(pcidev, 2);
851
852 DBG_ERROR("sxg: call ioremap(mmio_start[%lx], mmio_len[%lx])\n",
853 mmio_start, mmio_len);
854
855 memmapped_ioaddr = ioremap(mmio_start, mmio_len);
J.R. Mauro5c7514e2008-10-05 20:38:52 -0400856 DBG_ERROR("sxg: %s MEMMAPPED_IOADDR [%p]\n", __func__,
857 memmapped_ioaddr);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700858 if (!memmapped_ioaddr) {
859 DBG_ERROR("%s cannot remap MMIO region %lx @ %lx\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -0700860 __func__, mmio_len, mmio_start);
Mithlesh Thukral0d414722009-01-19 20:29:59 +0530861 goto err_out_free_mmio_region_2;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700862 }
863
864 DBG_ERROR("sxg: %s found Alacritech SXG PCI, MMIO at %p, "
865 "start[%lx] len[%lx], IRQ %d.\n", __func__,
866 memmapped_ioaddr, mmio_start, mmio_len, pcidev->irq);
867
868 adapter->UcodeRegs = (void *)memmapped_ioaddr;
869
870 adapter->State = SXG_STATE_INITIALIZING;
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +0530871 /*
872 * Maintain a list of all adapters anchored by
873 * the global SxgDriver structure.
874 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700875 adapter->Next = SxgDriver.Adapters;
876 SxgDriver.Adapters = adapter;
877 adapter->AdapterID = ++SxgDriver.AdapterID;
878
J.R. Maurob243c4a2008-10-20 19:28:58 -0400879 /* Initialize CRC table used to determine multicast hash */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700880 sxg_mcast_init_crc32();
881
882 adapter->JumboEnabled = FALSE;
883 adapter->RssEnabled = FALSE;
884 if (adapter->JumboEnabled) {
885 adapter->FrameSize = JUMBOMAXFRAME;
886 adapter->ReceiveBufferSize = SXG_RCV_JUMBO_BUFFER_SIZE;
887 } else {
888 adapter->FrameSize = ETHERMAXFRAME;
889 adapter->ReceiveBufferSize = SXG_RCV_DATA_BUFFER_SIZE;
890 }
891
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530892 /*
893 * status = SXG_READ_EEPROM(adapter);
894 * if (!status) {
895 * goto sxg_init_bad;
896 * }
897 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700898
Harvey Harrisone88bd232008-10-17 14:46:10 -0700899 DBG_ERROR("sxg: %s ENTER sxg_config_pci\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700900 sxg_config_pci(pcidev);
Harvey Harrisone88bd232008-10-17 14:46:10 -0700901 DBG_ERROR("sxg: %s EXIT sxg_config_pci\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700902
Harvey Harrisone88bd232008-10-17 14:46:10 -0700903 DBG_ERROR("sxg: %s ENTER sxg_init_driver\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700904 sxg_init_driver();
Harvey Harrisone88bd232008-10-17 14:46:10 -0700905 DBG_ERROR("sxg: %s EXIT sxg_init_driver\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700906
907 adapter->vendid = pci_tbl_entry->vendor;
908 adapter->devid = pci_tbl_entry->device;
909 adapter->subsysid = pci_tbl_entry->subdevice;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700910 adapter->slotnumber = ((pcidev->devfn >> 3) & 0x1F);
911 adapter->functionnumber = (pcidev->devfn & 0x7);
912 adapter->memorylength = pci_resource_len(pcidev, 0);
913 adapter->irq = pcidev->irq;
914 adapter->next_netdevice = head_netdevice;
915 head_netdevice = netdev;
J.R. Maurob243c4a2008-10-20 19:28:58 -0400916 adapter->port = 0; /*adapter->functionnumber; */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700917
J.R. Maurob243c4a2008-10-20 19:28:58 -0400918 /* Allocate memory and other resources */
Harvey Harrisone88bd232008-10-17 14:46:10 -0700919 DBG_ERROR("sxg: %s ENTER sxg_allocate_resources\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700920 status = sxg_allocate_resources(adapter);
921 DBG_ERROR("sxg: %s EXIT sxg_allocate_resources status %x\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -0700922 __func__, status);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700923 if (status != STATUS_SUCCESS) {
924 goto err_out_unmap;
925 }
926
Harvey Harrisone88bd232008-10-17 14:46:10 -0700927 DBG_ERROR("sxg: %s ENTER sxg_download_microcode\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700928 if (sxg_download_microcode(adapter, SXG_UCODE_SAHARA)) {
929 DBG_ERROR("sxg: %s ENTER sxg_adapter_set_hwaddr\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -0700930 __func__);
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +0530931 sxg_read_config(adapter);
Mithlesh Thukral54aed112009-01-19 20:27:17 +0530932 status = sxg_adapter_set_hwaddr(adapter);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700933 } else {
934 adapter->state = ADAPT_FAIL;
935 adapter->linkstate = LINK_DOWN;
936 DBG_ERROR("sxg_download_microcode FAILED status[%x]\n", status);
937 }
938
939 netdev->base_addr = (unsigned long)adapter->base_addr;
940 netdev->irq = adapter->irq;
941 netdev->open = sxg_entry_open;
942 netdev->stop = sxg_entry_halt;
943 netdev->hard_start_xmit = sxg_send_packets;
944 netdev->do_ioctl = sxg_ioctl;
945#if XXXTODO
946 netdev->set_mac_address = sxg_mac_set_address;
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +0530947#endif
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700948 netdev->get_stats = sxg_get_stats;
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +0530949 netdev->set_multicast_list = sxg_mcast_set_list;
Mithlesh Thukral371d7a92009-01-19 20:22:34 +0530950 SET_ETHTOOL_OPS(netdev, &sxg_nic_ethtool_ops);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700951
952 strcpy(netdev->name, "eth%d");
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530953 /* strcpy(netdev->name, pci_name(pcidev)); */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700954 if ((err = register_netdev(netdev))) {
955 DBG_ERROR("Cannot register net device, aborting. %s\n",
956 netdev->name);
957 goto err_out_unmap;
958 }
959
Mithlesh Thukralb62a2942009-01-30 20:19:03 +0530960 netif_napi_add(netdev, &adapter->napi,
961 sxg_poll, SXG_NETDEV_WEIGHT);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700962 DBG_ERROR
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530963 ("sxg: %s addr 0x%lx, irq %d, MAC addr \
964 %02X:%02X:%02X:%02X:%02X:%02X\n",
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700965 netdev->name, netdev->base_addr, pcidev->irq, netdev->dev_addr[0],
966 netdev->dev_addr[1], netdev->dev_addr[2], netdev->dev_addr[3],
967 netdev->dev_addr[4], netdev->dev_addr[5]);
968
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530969 /* sxg_init_bad: */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700970 ASSERT(status == FALSE);
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530971 /* sxg_free_adapter(adapter); */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700972
Harvey Harrisone88bd232008-10-17 14:46:10 -0700973 DBG_ERROR("sxg: %s EXIT status[%x] jiffies[%lx] cpu %d\n", __func__,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700974 status, jiffies, smp_processor_id());
975 return status;
976
977 err_out_unmap:
Mithlesh Thukral0d414722009-01-19 20:29:59 +0530978 sxg_free_resources(adapter);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700979
Mithlesh Thukral0d414722009-01-19 20:29:59 +0530980 err_out_free_mmio_region_2:
981
982 mmio_start = pci_resource_start(pcidev, 2);
983 mmio_len = pci_resource_len(pcidev, 2);
984 release_mem_region(mmio_start, mmio_len);
985
986 err_out_free_mmio_region_0:
987
988 mmio_start = pci_resource_start(pcidev, 0);
989 mmio_len = pci_resource_len(pcidev, 0);
990
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700991 release_mem_region(mmio_start, mmio_len);
992
993 err_out_exit_sxg_probe:
994
Harvey Harrisone88bd232008-10-17 14:46:10 -0700995 DBG_ERROR("%s EXIT jiffies[%lx] cpu %d\n", __func__, jiffies,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700996 smp_processor_id());
997
Mithlesh Thukral0d414722009-01-19 20:29:59 +0530998 pci_disable_device(pcidev);
999 DBG_ERROR("sxg: %s deallocate device\n", __FUNCTION__);
1000 kfree(netdev);
1001 printk("Exit %s, Sxg driver loading failed..\n", __FUNCTION__);
1002
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001003 return -ENODEV;
1004}
1005
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001006/*
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301007 * LINE BASE Interrupt routines..
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001008 *
1009 * sxg_disable_interrupt
1010 *
1011 * DisableInterrupt Handler
1012 *
1013 * Arguments:
1014 *
1015 * adapter: Our adapter structure
1016 *
1017 * Return Value:
1018 * None.
1019 */
J.R. Mauro73b07062008-10-28 18:42:02 -04001020static void sxg_disable_interrupt(struct adapter_t *adapter)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001021{
1022 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DisIntr",
1023 adapter, adapter->InterruptsEnabled, 0, 0);
J.R. Maurob243c4a2008-10-20 19:28:58 -04001024 /* For now, RSS is disabled with line based interrupts */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001025 ASSERT(adapter->RssEnabled == FALSE);
1026 ASSERT(adapter->MsiEnabled == FALSE);
J.R. Maurob243c4a2008-10-20 19:28:58 -04001027 /* Turn off interrupts by writing to the icr register. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001028 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_DISABLE), TRUE);
1029
1030 adapter->InterruptsEnabled = 0;
1031
1032 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDisIntr",
1033 adapter, adapter->InterruptsEnabled, 0, 0);
1034}
1035
1036/*
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001037 * sxg_enable_interrupt
1038 *
1039 * EnableInterrupt Handler
1040 *
1041 * Arguments:
1042 *
1043 * adapter: Our adapter structure
1044 *
1045 * Return Value:
1046 * None.
1047 */
J.R. Mauro73b07062008-10-28 18:42:02 -04001048static void sxg_enable_interrupt(struct adapter_t *adapter)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001049{
1050 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "EnIntr",
1051 adapter, adapter->InterruptsEnabled, 0, 0);
J.R. Maurob243c4a2008-10-20 19:28:58 -04001052 /* For now, RSS is disabled with line based interrupts */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001053 ASSERT(adapter->RssEnabled == FALSE);
1054 ASSERT(adapter->MsiEnabled == FALSE);
J.R. Maurob243c4a2008-10-20 19:28:58 -04001055 /* Turn on interrupts by writing to the icr register. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001056 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_ENABLE), TRUE);
1057
1058 adapter->InterruptsEnabled = 1;
1059
1060 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XEnIntr",
1061 adapter, 0, 0, 0);
1062}
1063
1064/*
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001065 * sxg_isr - Process an line-based interrupt
1066 *
1067 * Arguments:
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301068 * Context - Our adapter structure
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001069 * QueueDefault - Output parameter to queue to default CPU
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301070 * TargetCpus - Output bitmap to schedule DPC's
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001071 *
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301072 * Return Value: TRUE if our interrupt
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001073 */
1074static irqreturn_t sxg_isr(int irq, void *dev_id)
1075{
Mithlesh Thukral942798b2009-01-05 21:14:34 +05301076 struct net_device *dev = (struct net_device *) dev_id;
J.R. Mauro73b07062008-10-28 18:42:02 -04001077 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001078
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05301079 if(adapter->state != ADAPT_UP)
1080 return IRQ_NONE;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001081 adapter->Stats.NumInts++;
1082 if (adapter->Isr[0] == 0) {
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301083 /*
1084 * The SLIC driver used to experience a number of spurious
1085 * interrupts due to the delay associated with the masking of
1086 * the interrupt (we'd bounce back in here). If we see that
1087 * again with Sahara,add a READ_REG of the Icr register after
1088 * the WRITE_REG below.
1089 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001090 adapter->Stats.FalseInts++;
1091 return IRQ_NONE;
1092 }
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301093 /*
1094 * Move the Isr contents and clear the value in
1095 * shared memory, and mask interrupts
1096 */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301097 /* ASSERT(adapter->IsrDpcsPending == 0); */
J.R. Maurob243c4a2008-10-20 19:28:58 -04001098#if XXXTODO /* RSS Stuff */
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301099 /*
1100 * If RSS is enabled and the ISR specifies SXG_ISR_EVENT, then
1101 * schedule DPC's based on event queues.
1102 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001103 if (adapter->RssEnabled && (adapter->IsrCopy[0] & SXG_ISR_EVENT)) {
1104 for (i = 0;
1105 i < adapter->RssSystemInfo->ProcessorInfo.RssCpuCount;
1106 i++) {
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301107 struct sxg_event_ring *EventRing =
1108 &adapter->EventRings[i];
Mithlesh Thukral942798b2009-01-05 21:14:34 +05301109 struct sxg_event *Event =
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001110 &EventRing->Ring[adapter->NextEvent[i]];
J.R. Mauro5c7514e2008-10-05 20:38:52 -04001111 unsigned char Cpu =
1112 adapter->RssSystemInfo->RssIdToCpu[i];
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001113 if (Event->Status & EVENT_STATUS_VALID) {
1114 adapter->IsrDpcsPending++;
1115 CpuMask |= (1 << Cpu);
1116 }
1117 }
1118 }
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301119 /*
1120 * Now, either schedule the CPUs specified by the CpuMask,
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301121 * or queue default
1122 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001123 if (CpuMask) {
1124 *QueueDefault = FALSE;
1125 } else {
1126 adapter->IsrDpcsPending = 1;
1127 *QueueDefault = TRUE;
1128 }
1129 *TargetCpus = CpuMask;
1130#endif
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05301131 sxg_interrupt(adapter);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001132
1133 return IRQ_HANDLED;
1134}
1135
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05301136static void sxg_interrupt(struct adapter_t *adapter)
1137{
1138 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_MASK), TRUE);
1139
1140 if (netif_rx_schedule_prep(&adapter->napi)) {
1141 __netif_rx_schedule(&adapter->napi);
1142 }
1143}
1144
1145static void sxg_handle_interrupt(struct adapter_t *adapter, int *work_done,
1146 int budget)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001147{
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301148 /* unsigned char RssId = 0; */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001149 u32 NewIsr;
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05301150 int sxg_napi_continue = 1;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001151 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "HndlIntr",
1152 adapter, adapter->IsrCopy[0], 0, 0);
J.R. Maurob243c4a2008-10-20 19:28:58 -04001153 /* For now, RSS is disabled with line based interrupts */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001154 ASSERT(adapter->RssEnabled == FALSE);
1155 ASSERT(adapter->MsiEnabled == FALSE);
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05301156
1157 adapter->IsrCopy[0] = adapter->Isr[0];
1158 adapter->Isr[0] = 0;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001159
J.R. Maurob243c4a2008-10-20 19:28:58 -04001160 /* Always process the event queue. */
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05301161 while (sxg_napi_continue)
1162 {
1163 sxg_process_event_queue(adapter,
1164 (adapter->RssEnabled ? /*RssId */ 0 : 0),
1165 &sxg_napi_continue, work_done, budget);
1166 }
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001167
J.R. Maurob243c4a2008-10-20 19:28:58 -04001168#if XXXTODO /* RSS stuff */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001169 if (--adapter->IsrDpcsPending) {
J.R. Maurob243c4a2008-10-20 19:28:58 -04001170 /* We're done. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001171 ASSERT(adapter->RssEnabled);
1172 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DPCsPend",
1173 adapter, 0, 0, 0);
1174 return;
1175 }
1176#endif
J.R. Maurob243c4a2008-10-20 19:28:58 -04001177 /* Last (or only) DPC processes the ISR and clears the interrupt. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001178 NewIsr = sxg_process_isr(adapter, 0);
J.R. Maurob243c4a2008-10-20 19:28:58 -04001179 /* Reenable interrupts */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001180 adapter->IsrCopy[0] = 0;
1181 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "ClearIsr",
1182 adapter, NewIsr, 0, 0);
1183
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001184 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XHndlInt",
1185 adapter, 0, 0, 0);
1186}
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05301187static int sxg_poll(struct napi_struct *napi, int budget)
1188{
1189 struct adapter_t *adapter = container_of(napi, struct adapter_t, napi);
1190 int work_done = 0;
1191
1192 sxg_handle_interrupt(adapter, &work_done, budget);
1193
1194 if (work_done < budget) {
1195 netif_rx_complete(napi);
1196 WRITE_REG(adapter->UcodeRegs[0].Isr, 0, TRUE);
1197 }
1198
1199 return work_done;
1200}
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001201
1202/*
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001203 * sxg_process_isr - Process an interrupt. Called from the line-based and
1204 * message based interrupt DPC routines
1205 *
1206 * Arguments:
1207 * adapter - Our adapter structure
1208 * Queue - The ISR that needs processing
1209 *
1210 * Return Value:
1211 * None
1212 */
J.R. Mauro73b07062008-10-28 18:42:02 -04001213static int sxg_process_isr(struct adapter_t *adapter, u32 MessageId)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001214{
1215 u32 Isr = adapter->IsrCopy[MessageId];
1216 u32 NewIsr = 0;
1217
1218 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "ProcIsr",
1219 adapter, Isr, 0, 0);
1220
J.R. Maurob243c4a2008-10-20 19:28:58 -04001221 /* Error */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001222 if (Isr & SXG_ISR_ERR) {
1223 if (Isr & SXG_ISR_PDQF) {
1224 adapter->Stats.PdqFull++;
Harvey Harrisone88bd232008-10-17 14:46:10 -07001225 DBG_ERROR("%s: SXG_ISR_ERR PDQF!!\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001226 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04001227 /* No host buffer */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001228 if (Isr & SXG_ISR_RMISS) {
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301229 /*
1230 * There is a bunch of code in the SLIC driver which
1231 * attempts to process more receive events per DPC
1232 * if we start to fall behind. We'll probablyd
1233 * need to do something similar here, but hold
1234 * off for now. I don't want to make the code more
1235 * complicated than strictly needed.
1236 */
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05301237 adapter->stats.rx_missed_errors++;
Mithlesh Thukral54aed112009-01-19 20:27:17 +05301238 if (adapter->stats.rx_missed_errors< 5) {
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001239 DBG_ERROR("%s: SXG_ISR_ERR RMISS!!\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -07001240 __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001241 }
1242 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04001243 /* Card crash */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001244 if (Isr & SXG_ISR_DEAD) {
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301245 /*
1246 * Set aside the crash info and set the adapter state
1247 * to RESET
1248 */
1249 adapter->CrashCpu = (unsigned char)
1250 ((Isr & SXG_ISR_CPU) >> SXG_ISR_CPU_SHIFT);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001251 adapter->CrashLocation = (ushort) (Isr & SXG_ISR_CRASH);
1252 adapter->Dead = TRUE;
Harvey Harrisone88bd232008-10-17 14:46:10 -07001253 DBG_ERROR("%s: ISR_DEAD %x, CPU: %d\n", __func__,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001254 adapter->CrashLocation, adapter->CrashCpu);
1255 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04001256 /* Event ring full */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001257 if (Isr & SXG_ISR_ERFULL) {
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301258 /*
1259 * Same issue as RMISS, really. This means the
1260 * host is falling behind the card. Need to increase
1261 * event ring size, process more events per interrupt,
1262 * and/or reduce/remove interrupt aggregation.
1263 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001264 adapter->Stats.EventRingFull++;
1265 DBG_ERROR("%s: SXG_ISR_ERR EVENT RING FULL!!\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -07001266 __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001267 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04001268 /* Transmit drop - no DRAM buffers or XMT error */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001269 if (Isr & SXG_ISR_XDROP) {
Harvey Harrisone88bd232008-10-17 14:46:10 -07001270 DBG_ERROR("%s: SXG_ISR_ERR XDROP!!\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001271 }
1272 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04001273 /* Slowpath send completions */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001274 if (Isr & SXG_ISR_SPSEND) {
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05301275 sxg_complete_slow_send(adapter, 1);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001276 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04001277 /* Dump */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001278 if (Isr & SXG_ISR_UPC) {
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301279 /* Maybe change when debug is added.. */
Mithlesh Thukral54aed112009-01-19 20:27:17 +05301280// ASSERT(adapter->DumpCmdRunning);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001281 adapter->DumpCmdRunning = FALSE;
1282 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04001283 /* Link event */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001284 if (Isr & SXG_ISR_LINK) {
1285 sxg_link_event(adapter);
1286 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04001287 /* Debug - breakpoint hit */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001288 if (Isr & SXG_ISR_BREAK) {
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301289 /*
1290 * At the moment AGDB isn't written to support interactive
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301291 * debug sessions. When it is, this interrupt will be used to
1292 * signal AGDB that it has hit a breakpoint. For now, ASSERT.
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301293 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001294 ASSERT(0);
1295 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04001296 /* Heartbeat response */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001297 if (Isr & SXG_ISR_PING) {
1298 adapter->PingOutstanding = FALSE;
1299 }
1300 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XProcIsr",
1301 adapter, Isr, NewIsr, 0);
1302
1303 return (NewIsr);
1304}
1305
1306/*
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001307 * sxg_process_event_queue - Process our event queue
1308 *
1309 * Arguments:
1310 * - adapter - Adapter structure
1311 * - RssId - The event queue requiring processing
1312 *
1313 * Return Value:
1314 * None.
1315 */
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05301316static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId,
1317 int *sxg_napi_continue, int *work_done, int budget)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001318{
Mithlesh Thukral942798b2009-01-05 21:14:34 +05301319 struct sxg_event_ring *EventRing = &adapter->EventRings[RssId];
1320 struct sxg_event *Event = &EventRing->Ring[adapter->NextEvent[RssId]];
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001321 u32 EventsProcessed = 0, Batches = 0;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001322 struct sk_buff *skb;
1323#ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
1324 struct sk_buff *prev_skb = NULL;
1325 struct sk_buff *IndicationList[SXG_RCV_ARRAYSIZE];
1326 u32 Index;
Mithlesh Thukral942798b2009-01-05 21:14:34 +05301327 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001328#endif
1329 u32 ReturnStatus = 0;
1330
1331 ASSERT((adapter->State == SXG_STATE_RUNNING) ||
1332 (adapter->State == SXG_STATE_PAUSING) ||
1333 (adapter->State == SXG_STATE_PAUSED) ||
1334 (adapter->State == SXG_STATE_HALTING));
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301335 /*
1336 * We may still have unprocessed events on the queue if
1337 * the card crashed. Don't process them.
1338 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001339 if (adapter->Dead) {
1340 return (0);
1341 }
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301342 /*
1343 * In theory there should only be a single processor that
1344 * accesses this queue, and only at interrupt-DPC time. So/
1345 * we shouldn't need a lock for any of this.
1346 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001347 while (Event->Status & EVENT_STATUS_VALID) {
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05301348 (*sxg_napi_continue) = 1;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001349 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "Event",
1350 Event, Event->Code, Event->Status,
1351 adapter->NextEvent);
1352 switch (Event->Code) {
1353 case EVENT_CODE_BUFFERS:
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301354 /* struct sxg_ring_info Head & Tail == unsigned char */
1355 ASSERT(!(Event->CommandIndex & 0xFF00));
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001356 sxg_complete_descriptor_blocks(adapter,
1357 Event->CommandIndex);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001358 break;
1359 case EVENT_CODE_SLOWRCV:
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05301360 (*work_done)++;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001361 --adapter->RcvBuffersOnCard;
1362 if ((skb = sxg_slow_receive(adapter, Event))) {
1363 u32 rx_bytes;
1364#ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
J.R. Maurob243c4a2008-10-20 19:28:58 -04001365 /* Add it to our indication list */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001366 SXG_ADD_RCV_PACKET(adapter, skb, prev_skb,
1367 IndicationList, num_skbs);
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301368 /*
1369 * Linux, we just pass up each skb to the
1370 * protocol above at this point, there is no
1371 * capability of an indication list.
1372 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001373#else
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301374 /* CHECK skb_pull(skb, INIC_RCVBUF_HEADSIZE); */
1375 /* (rcvbuf->length & IRHDDR_FLEN_MSK); */
1376 rx_bytes = Event->Length;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001377 adapter->stats.rx_packets++;
1378 adapter->stats.rx_bytes += rx_bytes;
1379#if SXG_OFFLOAD_IP_CHECKSUM
1380 skb->ip_summed = CHECKSUM_UNNECESSARY;
1381#endif
1382 skb->dev = adapter->netdev;
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05301383 netif_receive_skb(skb);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001384#endif
1385 }
1386 break;
1387 default:
1388 DBG_ERROR("%s: ERROR Invalid EventCode %d\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -07001389 __func__, Event->Code);
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301390 /* ASSERT(0); */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001391 }
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301392 /*
1393 * See if we need to restock card receive buffers.
1394 * There are two things to note here:
1395 * First - This test is not SMP safe. The
1396 * adapter->BuffersOnCard field is protected via atomic
1397 * interlocked calls, but we do not protect it with respect
1398 * to these tests. The only way to do that is with a lock,
1399 * and I don't want to grab a lock every time we adjust the
1400 * BuffersOnCard count. Instead, we allow the buffer
1401 * replenishment to be off once in a while. The worst that
1402 * can happen is the card is given on more-or-less descriptor
1403 * block than the arbitrary value we've chosen. No big deal
1404 * In short DO NOT ADD A LOCK HERE, OR WHERE RcvBuffersOnCard
1405 * is adjusted.
1406 * Second - We expect this test to rarely
1407 * evaluate to true. We attempt to refill descriptor blocks
1408 * as they are returned to us (sxg_complete_descriptor_blocks)
1409 * so The only time this should evaluate to true is when
1410 * sxg_complete_descriptor_blocks failed to allocate
1411 * receive buffers.
1412 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001413 if (adapter->RcvBuffersOnCard < SXG_RCV_DATA_BUFFERS) {
1414 sxg_stock_rcv_buffers(adapter);
1415 }
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301416 /*
1417 * It's more efficient to just set this to zero.
1418 * But clearing the top bit saves potential debug info...
1419 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001420 Event->Status &= ~EVENT_STATUS_VALID;
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301421 /* Advance to the next event */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001422 SXG_ADVANCE_INDEX(adapter->NextEvent[RssId], EVENT_RING_SIZE);
1423 Event = &EventRing->Ring[adapter->NextEvent[RssId]];
1424 EventsProcessed++;
1425 if (EventsProcessed == EVENT_RING_BATCH) {
J.R. Maurob243c4a2008-10-20 19:28:58 -04001426 /* Release a batch of events back to the card */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001427 WRITE_REG(adapter->UcodeRegs[RssId].EventRelease,
1428 EVENT_RING_BATCH, FALSE);
1429 EventsProcessed = 0;
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301430 /*
1431 * If we've processed our batch limit, break out of the
1432 * loop and return SXG_ISR_EVENT to arrange for us to
1433 * be called again
1434 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001435 if (Batches++ == EVENT_BATCH_LIMIT) {
1436 SXG_TRACE(TRACE_SXG, SxgTraceBuffer,
1437 TRACE_NOISY, "EvtLimit", Batches,
1438 adapter->NextEvent, 0, 0);
1439 ReturnStatus = SXG_ISR_EVENT;
1440 break;
1441 }
1442 }
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05301443 if (*work_done >= budget) {
1444 WRITE_REG(adapter->UcodeRegs[RssId].EventRelease,
1445 EventsProcessed, FALSE);
1446 EventsProcessed = 0;
1447 (*sxg_napi_continue) = 0;
1448 break;
1449 }
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001450 }
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05301451 if (!(Event->Status & EVENT_STATUS_VALID))
1452 (*sxg_napi_continue) = 0;
1453
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001454#ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
J.R. Maurob243c4a2008-10-20 19:28:58 -04001455 /* Indicate any received dumb-nic frames */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001456 SXG_INDICATE_PACKETS(adapter, IndicationList, num_skbs);
1457#endif
J.R. Maurob243c4a2008-10-20 19:28:58 -04001458 /* Release events back to the card. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001459 if (EventsProcessed) {
1460 WRITE_REG(adapter->UcodeRegs[RssId].EventRelease,
1461 EventsProcessed, FALSE);
1462 }
1463 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XPrcEvnt",
1464 Batches, EventsProcessed, adapter->NextEvent, num_skbs);
1465
1466 return (ReturnStatus);
1467}
1468
1469/*
1470 * sxg_complete_slow_send - Complete slowpath or dumb-nic sends
1471 *
1472 * Arguments -
1473 * adapter - A pointer to our adapter structure
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05301474 * irq_context - An integer to denote if we are in interrupt context
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001475 * Return
1476 * None
1477 */
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05301478static void sxg_complete_slow_send(struct adapter_t *adapter, int irq_context)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001479{
Mithlesh Thukral942798b2009-01-05 21:14:34 +05301480 struct sxg_xmt_ring *XmtRing = &adapter->XmtRings[0];
1481 struct sxg_ring_info *XmtRingInfo = &adapter->XmtRingZeroInfo;
J.R. Mauro5c7514e2008-10-05 20:38:52 -04001482 u32 *ContextType;
Mithlesh Thukral942798b2009-01-05 21:14:34 +05301483 struct sxg_cmd *XmtCmd;
Mithlesh Thukral54aed112009-01-19 20:27:17 +05301484 unsigned long flags = 0;
1485 unsigned long sgl_flags = 0;
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05301486 unsigned int processed_count = 0;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001487
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301488 /*
1489 * NOTE - This lock is dropped and regrabbed in this loop.
1490 * This means two different processors can both be running/
1491 * through this loop. Be *very* careful.
1492 */
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05301493 if(irq_context) {
1494 if(!spin_trylock(&adapter->XmtZeroLock))
1495 goto lock_busy;
1496 }
1497 else
1498 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
1499
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001500 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnds",
1501 adapter, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
1502
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05301503 while ((XmtRingInfo->Tail != *adapter->XmtRingZeroIndex)
1504 && processed_count++ < SXG_COMPLETE_SLOW_SEND_LIMIT) {
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301505 /*
1506 * Locate the current Cmd (ring descriptor entry), and
1507 * associated SGL, and advance the tail
1508 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001509 SXG_RETURN_CMD(XmtRing, XmtRingInfo, XmtCmd, ContextType);
1510 ASSERT(ContextType);
1511 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnd",
1512 XmtRingInfo->Head, XmtRingInfo->Tail, XmtCmd, 0);
J.R. Maurob243c4a2008-10-20 19:28:58 -04001513 /* Clear the SGL field. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001514 XmtCmd->Sgl = 0;
1515
1516 switch (*ContextType) {
1517 case SXG_SGL_DUMB:
1518 {
1519 struct sk_buff *skb;
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301520 struct sxg_scatter_gather *SxgSgl =
1521 (struct sxg_scatter_gather *)ContextType;
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05301522 dma64_addr_t FirstSgeAddress;
1523 u32 FirstSgeLength;
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05301524
J.R. Maurob243c4a2008-10-20 19:28:58 -04001525 /* Dumb-nic send. Command context is the dumb-nic SGL */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001526 skb = (struct sk_buff *)ContextType;
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05301527 skb = SxgSgl->DumbPacket;
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05301528 FirstSgeAddress = XmtCmd->Buffer.FirstSgeAddress;
1529 FirstSgeLength = XmtCmd->Buffer.FirstSgeLength;
J.R. Maurob243c4a2008-10-20 19:28:58 -04001530 /* Complete the send */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001531 SXG_TRACE(TRACE_SXG, SxgTraceBuffer,
1532 TRACE_IMPORTANT, "DmSndCmp", skb, 0,
1533 0, 0);
1534 ASSERT(adapter->Stats.XmtQLen);
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301535 /*
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301536 * Now drop the lock and complete the send
1537 * back to Microsoft. We need to drop the lock
1538 * because Microsoft can come back with a
1539 * chimney send, which results in a double trip
1540 * in SxgTcpOuput
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301541 */
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05301542 if(irq_context)
1543 spin_unlock(&adapter->XmtZeroLock);
1544 else
1545 spin_unlock_irqrestore(
1546 &adapter->XmtZeroLock, flags);
1547
1548 SxgSgl->DumbPacket = NULL;
1549 SXG_COMPLETE_DUMB_SEND(adapter, skb,
1550 FirstSgeAddress,
1551 FirstSgeLength);
1552 SXG_FREE_SGL_BUFFER(adapter, SxgSgl, NULL,
1553 irq_context);
J.R. Maurob243c4a2008-10-20 19:28:58 -04001554 /* and reacquire.. */
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05301555 if(irq_context) {
1556 if(!spin_trylock(&adapter->XmtZeroLock))
1557 goto lock_busy;
1558 }
1559 else
1560 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001561 }
1562 break;
1563 default:
1564 ASSERT(0);
1565 }
1566 }
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05301567 if(irq_context)
1568 spin_unlock(&adapter->XmtZeroLock);
1569 else
1570 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
1571lock_busy:
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001572 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnd",
1573 adapter, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
1574}
1575
1576/*
1577 * sxg_slow_receive
1578 *
1579 * Arguments -
1580 * adapter - A pointer to our adapter structure
1581 * Event - Receive event
1582 *
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301583 * Return - skb
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001584 */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301585static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter,
1586 struct sxg_event *Event)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001587{
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05301588 u32 BufferSize = adapter->ReceiveBufferSize;
Mithlesh Thukral942798b2009-01-05 21:14:34 +05301589 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001590 struct sk_buff *Packet;
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05301591 static int read_counter = 0;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001592
Mithlesh Thukral942798b2009-01-05 21:14:34 +05301593 RcvDataBufferHdr = (struct sxg_rcv_data_buffer_hdr *) Event->HostHandle;
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05301594 if(read_counter++ & 0x100)
1595 {
1596 sxg_collect_statistics(adapter);
1597 read_counter = 0;
1598 }
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001599 ASSERT(RcvDataBufferHdr);
1600 ASSERT(RcvDataBufferHdr->State == SXG_BUFFER_ONCARD);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001601 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "SlowRcv", Event,
1602 RcvDataBufferHdr, RcvDataBufferHdr->State,
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05301603 /*RcvDataBufferHdr->VirtualAddress*/ 0);
J.R. Maurob243c4a2008-10-20 19:28:58 -04001604 /* Drop rcv frames in non-running state */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001605 switch (adapter->State) {
1606 case SXG_STATE_RUNNING:
1607 break;
1608 case SXG_STATE_PAUSING:
1609 case SXG_STATE_PAUSED:
1610 case SXG_STATE_HALTING:
1611 goto drop;
1612 default:
1613 ASSERT(0);
1614 goto drop;
1615 }
1616
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301617 /*
1618 * memcpy(SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
1619 * RcvDataBufferHdr->VirtualAddress, Event->Length);
1620 */
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05301621
J.R. Maurob243c4a2008-10-20 19:28:58 -04001622 /* Change buffer state to UPSTREAM */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001623 RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM;
1624 if (Event->Status & EVENT_STATUS_RCVERR) {
1625 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "RcvError",
1626 Event, Event->Status, Event->HostHandle, 0);
J.R. Maurob243c4a2008-10-20 19:28:58 -04001627 /* XXXTODO - Remove this print later */
J.R. Mauro5c7514e2008-10-05 20:38:52 -04001628 DBG_ERROR("SXG: Receive error %x\n", *(u32 *)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001629 SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr));
J.R. Mauro5c7514e2008-10-05 20:38:52 -04001630 sxg_process_rcv_error(adapter, *(u32 *)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001631 SXG_RECEIVE_DATA_LOCATION
1632 (RcvDataBufferHdr));
1633 goto drop;
1634 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04001635#if XXXTODO /* VLAN stuff */
1636 /* If there's a VLAN tag, extract it and validate it */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301637 if (((struct ether_header *)
1638 (SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr)))->EtherType
1639 == ETHERTYPE_VLAN) {
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001640 if (SxgExtractVlanHeader(adapter, RcvDataBufferHdr, Event) !=
1641 STATUS_SUCCESS) {
1642 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY,
1643 "BadVlan", Event,
1644 SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
1645 Event->Length, 0);
1646 goto drop;
1647 }
1648 }
1649#endif
J.R. Maurob243c4a2008-10-20 19:28:58 -04001650 /* Dumb-nic frame. See if it passes our mac filter and update stats */
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301651
Mithlesh Thukralb040b072009-01-28 07:08:11 +05301652 if (!sxg_mac_filter(adapter,
1653 (struct ether_header *)(SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr)),
1654 Event->Length)) {
1655 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "RcvFiltr",
1656 Event, SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
1657 Event->Length, 0);
1658 goto drop;
1659 }
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001660
1661 Packet = RcvDataBufferHdr->SxgDumbRcvPacket;
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05301662 SXG_ADJUST_RCV_PACKET(Packet, RcvDataBufferHdr, Event);
1663 Packet->protocol = eth_type_trans(Packet, adapter->netdev);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001664
1665 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumbRcv",
1666 RcvDataBufferHdr, Packet, Event->Length, 0);
J.R. Maurob243c4a2008-10-20 19:28:58 -04001667 /* Lastly adjust the receive packet length. */
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05301668 RcvDataBufferHdr->SxgDumbRcvPacket = NULL;
Mithlesh Thukral54aed112009-01-19 20:27:17 +05301669 RcvDataBufferHdr->PhysicalAddress = (dma_addr_t)NULL;
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05301670 SXG_ALLOCATE_RCV_PACKET(adapter, RcvDataBufferHdr, BufferSize);
1671 if (RcvDataBufferHdr->skb)
1672 {
1673 spin_lock(&adapter->RcvQLock);
1674 SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05301675 // adapter->RcvBuffersOnCard ++;
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05301676 spin_unlock(&adapter->RcvQLock);
1677 }
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001678 return (Packet);
1679
1680 drop:
1681 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DropRcv",
1682 RcvDataBufferHdr, Event->Length, 0, 0);
Mithlesh Thukral54aed112009-01-19 20:27:17 +05301683 adapter->stats.rx_dropped++;
1684// adapter->Stats.RcvDiscards++;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001685 spin_lock(&adapter->RcvQLock);
1686 SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
1687 spin_unlock(&adapter->RcvQLock);
1688 return (NULL);
1689}
1690
1691/*
1692 * sxg_process_rcv_error - process receive error and update
1693 * stats
1694 *
1695 * Arguments:
1696 * adapter - Adapter structure
1697 * ErrorStatus - 4-byte receive error status
1698 *
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301699 * Return Value : None
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001700 */
J.R. Mauro73b07062008-10-28 18:42:02 -04001701static void sxg_process_rcv_error(struct adapter_t *adapter, u32 ErrorStatus)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001702{
1703 u32 Error;
1704
Mithlesh Thukral54aed112009-01-19 20:27:17 +05301705 adapter->stats.rx_errors++;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001706
1707 if (ErrorStatus & SXG_RCV_STATUS_TRANSPORT_ERROR) {
1708 Error = ErrorStatus & SXG_RCV_STATUS_TRANSPORT_MASK;
1709 switch (Error) {
1710 case SXG_RCV_STATUS_TRANSPORT_CSUM:
1711 adapter->Stats.TransportCsum++;
1712 break;
1713 case SXG_RCV_STATUS_TRANSPORT_UFLOW:
1714 adapter->Stats.TransportUflow++;
1715 break;
1716 case SXG_RCV_STATUS_TRANSPORT_HDRLEN:
1717 adapter->Stats.TransportHdrLen++;
1718 break;
1719 }
1720 }
1721 if (ErrorStatus & SXG_RCV_STATUS_NETWORK_ERROR) {
1722 Error = ErrorStatus & SXG_RCV_STATUS_NETWORK_MASK;
1723 switch (Error) {
1724 case SXG_RCV_STATUS_NETWORK_CSUM:
1725 adapter->Stats.NetworkCsum++;
1726 break;
1727 case SXG_RCV_STATUS_NETWORK_UFLOW:
1728 adapter->Stats.NetworkUflow++;
1729 break;
1730 case SXG_RCV_STATUS_NETWORK_HDRLEN:
1731 adapter->Stats.NetworkHdrLen++;
1732 break;
1733 }
1734 }
1735 if (ErrorStatus & SXG_RCV_STATUS_PARITY) {
1736 adapter->Stats.Parity++;
1737 }
1738 if (ErrorStatus & SXG_RCV_STATUS_LINK_ERROR) {
1739 Error = ErrorStatus & SXG_RCV_STATUS_LINK_MASK;
1740 switch (Error) {
1741 case SXG_RCV_STATUS_LINK_PARITY:
1742 adapter->Stats.LinkParity++;
1743 break;
1744 case SXG_RCV_STATUS_LINK_EARLY:
1745 adapter->Stats.LinkEarly++;
1746 break;
1747 case SXG_RCV_STATUS_LINK_BUFOFLOW:
1748 adapter->Stats.LinkBufOflow++;
1749 break;
1750 case SXG_RCV_STATUS_LINK_CODE:
1751 adapter->Stats.LinkCode++;
1752 break;
1753 case SXG_RCV_STATUS_LINK_DRIBBLE:
1754 adapter->Stats.LinkDribble++;
1755 break;
1756 case SXG_RCV_STATUS_LINK_CRC:
1757 adapter->Stats.LinkCrc++;
1758 break;
1759 case SXG_RCV_STATUS_LINK_OFLOW:
1760 adapter->Stats.LinkOflow++;
1761 break;
1762 case SXG_RCV_STATUS_LINK_UFLOW:
1763 adapter->Stats.LinkUflow++;
1764 break;
1765 }
1766 }
1767}
1768
1769/*
1770 * sxg_mac_filter
1771 *
1772 * Arguments:
1773 * adapter - Adapter structure
1774 * pether - Ethernet header
1775 * length - Frame length
1776 *
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301777 * Return Value : TRUE if the frame is to be allowed
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001778 */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301779static bool sxg_mac_filter(struct adapter_t *adapter,
1780 struct ether_header *EtherHdr, ushort length)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001781{
1782 bool EqualAddr;
Mithlesh Thukralb040b072009-01-28 07:08:11 +05301783 struct net_device *dev = adapter->netdev;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001784
1785 if (SXG_MULTICAST_PACKET(EtherHdr)) {
1786 if (SXG_BROADCAST_PACKET(EtherHdr)) {
J.R. Maurob243c4a2008-10-20 19:28:58 -04001787 /* broadcast */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001788 if (adapter->MacFilter & MAC_BCAST) {
1789 adapter->Stats.DumbRcvBcastPkts++;
1790 adapter->Stats.DumbRcvBcastBytes += length;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001791 return (TRUE);
1792 }
1793 } else {
J.R. Maurob243c4a2008-10-20 19:28:58 -04001794 /* multicast */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001795 if (adapter->MacFilter & MAC_ALLMCAST) {
1796 adapter->Stats.DumbRcvMcastPkts++;
1797 adapter->Stats.DumbRcvMcastBytes += length;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001798 return (TRUE);
1799 }
1800 if (adapter->MacFilter & MAC_MCAST) {
Mithlesh Thukralb040b072009-01-28 07:08:11 +05301801 struct dev_mc_list *mclist = dev->mc_list;
1802 while (mclist) {
1803 ETHER_EQ_ADDR(mclist->da_addr,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001804 EtherHdr->ether_dhost,
1805 EqualAddr);
1806 if (EqualAddr) {
1807 adapter->Stats.
1808 DumbRcvMcastPkts++;
1809 adapter->Stats.
1810 DumbRcvMcastBytes += length;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001811 return (TRUE);
1812 }
Mithlesh Thukralb040b072009-01-28 07:08:11 +05301813 mclist = mclist->next;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001814 }
1815 }
1816 }
1817 } else if (adapter->MacFilter & MAC_DIRECTED) {
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301818 /*
1819 * Not broadcast or multicast. Must be directed at us or
1820 * the card is in promiscuous mode. Either way, consider it
1821 * ours if MAC_DIRECTED is set
1822 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001823 adapter->Stats.DumbRcvUcastPkts++;
1824 adapter->Stats.DumbRcvUcastBytes += length;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001825 return (TRUE);
1826 }
1827 if (adapter->MacFilter & MAC_PROMISC) {
J.R. Maurob243c4a2008-10-20 19:28:58 -04001828 /* Whatever it is, keep it. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001829 return (TRUE);
1830 }
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001831 return (FALSE);
1832}
Mithlesh Thukralb040b072009-01-28 07:08:11 +05301833
J.R. Mauro73b07062008-10-28 18:42:02 -04001834static int sxg_register_interrupt(struct adapter_t *adapter)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001835{
1836 if (!adapter->intrregistered) {
1837 int retval;
1838
1839 DBG_ERROR
1840 ("sxg: %s AllocAdaptRsrcs adapter[%p] dev->irq[%x] %x\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -07001841 __func__, adapter, adapter->netdev->irq, NR_IRQS);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001842
J.R. Mauro5c7514e2008-10-05 20:38:52 -04001843 spin_unlock_irqrestore(&sxg_global.driver_lock,
1844 sxg_global.flags);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001845
1846 retval = request_irq(adapter->netdev->irq,
1847 &sxg_isr,
1848 IRQF_SHARED,
1849 adapter->netdev->name, adapter->netdev);
1850
1851 spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
1852
1853 if (retval) {
1854 DBG_ERROR("sxg: request_irq (%s) FAILED [%x]\n",
1855 adapter->netdev->name, retval);
1856 return (retval);
1857 }
1858 adapter->intrregistered = 1;
1859 adapter->IntRegistered = TRUE;
J.R. Maurob243c4a2008-10-20 19:28:58 -04001860 /* Disable RSS with line-based interrupts */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001861 adapter->MsiEnabled = FALSE;
1862 adapter->RssEnabled = FALSE;
1863 DBG_ERROR("sxg: %s AllocAdaptRsrcs adapter[%p] dev->irq[%x]\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -07001864 __func__, adapter, adapter->netdev->irq);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001865 }
1866 return (STATUS_SUCCESS);
1867}
1868
J.R. Mauro73b07062008-10-28 18:42:02 -04001869static void sxg_deregister_interrupt(struct adapter_t *adapter)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001870{
Harvey Harrisone88bd232008-10-17 14:46:10 -07001871 DBG_ERROR("sxg: %s ENTER adapter[%p]\n", __func__, adapter);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001872#if XXXTODO
1873 slic_init_cleanup(adapter);
1874#endif
1875 memset(&adapter->stats, 0, sizeof(struct net_device_stats));
1876 adapter->error_interrupts = 0;
1877 adapter->rcv_interrupts = 0;
1878 adapter->xmit_interrupts = 0;
1879 adapter->linkevent_interrupts = 0;
1880 adapter->upr_interrupts = 0;
1881 adapter->num_isrs = 0;
1882 adapter->xmit_completes = 0;
1883 adapter->rcv_broadcasts = 0;
1884 adapter->rcv_multicasts = 0;
1885 adapter->rcv_unicasts = 0;
Harvey Harrisone88bd232008-10-17 14:46:10 -07001886 DBG_ERROR("sxg: %s EXIT\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001887}
1888
1889/*
1890 * sxg_if_init
1891 *
1892 * Perform initialization of our slic interface.
1893 *
1894 */
J.R. Mauro73b07062008-10-28 18:42:02 -04001895static int sxg_if_init(struct adapter_t *adapter)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001896{
Mithlesh Thukral942798b2009-01-05 21:14:34 +05301897 struct net_device *dev = adapter->netdev;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001898 int status = 0;
1899
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05301900 DBG_ERROR("sxg: %s (%s) ENTER states[%d:%d] flags[%x]\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -07001901 __func__, adapter->netdev->name,
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05301902 adapter->state,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001903 adapter->linkstate, dev->flags);
1904
1905 /* adapter should be down at this point */
1906 if (adapter->state != ADAPT_DOWN) {
1907 DBG_ERROR("sxg_if_init adapter->state != ADAPT_DOWN\n");
1908 return (-EIO);
1909 }
1910 ASSERT(adapter->linkstate == LINK_DOWN);
1911
1912 adapter->devflags_prev = dev->flags;
Mithlesh Thukralb040b072009-01-28 07:08:11 +05301913 adapter->MacFilter = MAC_DIRECTED;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001914 if (dev->flags) {
Harvey Harrisone88bd232008-10-17 14:46:10 -07001915 DBG_ERROR("sxg: %s (%s) Set MAC options: ", __func__,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001916 adapter->netdev->name);
1917 if (dev->flags & IFF_BROADCAST) {
Mithlesh Thukralb040b072009-01-28 07:08:11 +05301918 adapter->MacFilter |= MAC_BCAST;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001919 DBG_ERROR("BCAST ");
1920 }
1921 if (dev->flags & IFF_PROMISC) {
Mithlesh Thukralb040b072009-01-28 07:08:11 +05301922 adapter->MacFilter |= MAC_PROMISC;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001923 DBG_ERROR("PROMISC ");
1924 }
1925 if (dev->flags & IFF_ALLMULTI) {
Mithlesh Thukralb040b072009-01-28 07:08:11 +05301926 adapter->MacFilter |= MAC_ALLMCAST;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001927 DBG_ERROR("ALL_MCAST ");
1928 }
1929 if (dev->flags & IFF_MULTICAST) {
Mithlesh Thukralb040b072009-01-28 07:08:11 +05301930 adapter->MacFilter |= MAC_MCAST;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001931 DBG_ERROR("MCAST ");
1932 }
1933 DBG_ERROR("\n");
1934 }
1935 status = sxg_register_interrupt(adapter);
1936 if (status != STATUS_SUCCESS) {
1937 DBG_ERROR("sxg_if_init: sxg_register_interrupt FAILED %x\n",
1938 status);
1939 sxg_deregister_interrupt(adapter);
1940 return (status);
1941 }
1942
1943 adapter->state = ADAPT_UP;
1944
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301945 /* clear any pending events, then enable interrupts */
Harvey Harrisone88bd232008-10-17 14:46:10 -07001946 DBG_ERROR("sxg: %s ENABLE interrupts(slic)\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001947
1948 return (STATUS_SUCCESS);
1949}
1950
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05301951void sxg_set_interrupt_aggregation(struct adapter_t *adapter)
1952{
1953 /*
1954 * Top bit disables aggregation on xmt (SXG_AGG_XMT_DISABLE).
1955 * Make sure Max is less than 0x8000.
1956 */
1957 adapter->max_aggregation = SXG_MAX_AGG_DEFAULT;
1958 adapter->min_aggregation = SXG_MIN_AGG_DEFAULT;
1959 WRITE_REG(adapter->UcodeRegs[0].Aggregation,
1960 ((adapter->max_aggregation << SXG_MAX_AGG_SHIFT) |
1961 adapter->min_aggregation),
1962 TRUE);
1963}
1964
Mithlesh Thukral942798b2009-01-05 21:14:34 +05301965static int sxg_entry_open(struct net_device *dev)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001966{
J.R. Mauro73b07062008-10-28 18:42:02 -04001967 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001968 int status;
Mithlesh Thukral0d414722009-01-19 20:29:59 +05301969 static int turn;
1970
1971 if (turn) {
1972 sxg_second_open(adapter->netdev);
1973
1974 return STATUS_SUCCESS;
1975 }
1976
1977 turn++;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001978
1979 ASSERT(adapter);
Harvey Harrisone88bd232008-10-17 14:46:10 -07001980 DBG_ERROR("sxg: %s adapter->activated[%d]\n", __func__,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001981 adapter->activated);
1982 DBG_ERROR
1983 ("sxg: %s (%s): [jiffies[%lx] cpu %d] dev[%p] adapt[%p] port[%d]\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -07001984 __func__, adapter->netdev->name, jiffies, smp_processor_id(),
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001985 adapter->netdev, adapter, adapter->port);
1986
1987 netif_stop_queue(adapter->netdev);
1988
1989 spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
1990 if (!adapter->activated) {
1991 sxg_global.num_sxg_ports_active++;
1992 adapter->activated = 1;
1993 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04001994 /* Initialize the adapter */
Harvey Harrisone88bd232008-10-17 14:46:10 -07001995 DBG_ERROR("sxg: %s ENTER sxg_initialize_adapter\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001996 status = sxg_initialize_adapter(adapter);
1997 DBG_ERROR("sxg: %s EXIT sxg_initialize_adapter status[%x]\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -07001998 __func__, status);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001999
2000 if (status == STATUS_SUCCESS) {
Harvey Harrisone88bd232008-10-17 14:46:10 -07002001 DBG_ERROR("sxg: %s ENTER sxg_if_init\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002002 status = sxg_if_init(adapter);
Harvey Harrisone88bd232008-10-17 14:46:10 -07002003 DBG_ERROR("sxg: %s EXIT sxg_if_init status[%x]\n", __func__,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002004 status);
2005 }
2006
2007 if (status != STATUS_SUCCESS) {
2008 if (adapter->activated) {
2009 sxg_global.num_sxg_ports_active--;
2010 adapter->activated = 0;
2011 }
2012 spin_unlock_irqrestore(&sxg_global.driver_lock,
2013 sxg_global.flags);
2014 return (status);
2015 }
Harvey Harrisone88bd232008-10-17 14:46:10 -07002016 DBG_ERROR("sxg: %s ENABLE ALL INTERRUPTS\n", __func__);
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05302017 sxg_set_interrupt_aggregation(adapter);
2018 napi_enable(&adapter->napi);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002019
J.R. Maurob243c4a2008-10-20 19:28:58 -04002020 /* Enable interrupts */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002021 SXG_ENABLE_ALL_INTERRUPTS(adapter);
2022
Harvey Harrisone88bd232008-10-17 14:46:10 -07002023 DBG_ERROR("sxg: %s EXIT\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002024
2025 spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
2026 return STATUS_SUCCESS;
2027}
2028
Mithlesh Thukral0d414722009-01-19 20:29:59 +05302029int sxg_second_open(struct net_device * dev)
2030{
2031 struct adapter_t *adapter = (struct adapter_t*) netdev_priv(dev);
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05302032 int status = 0;
Mithlesh Thukral0d414722009-01-19 20:29:59 +05302033
2034 spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
2035 netif_start_queue(adapter->netdev);
2036 adapter->state = ADAPT_UP;
2037 adapter->linkstate = LINK_UP;
2038
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05302039 status = sxg_initialize_adapter(adapter);
2040 sxg_set_interrupt_aggregation(adapter);
2041 napi_enable(&adapter->napi);
Mithlesh Thukral0d414722009-01-19 20:29:59 +05302042 /* Re-enable interrupts */
2043 SXG_ENABLE_ALL_INTERRUPTS(adapter);
2044
2045 netif_carrier_on(dev);
2046 spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
2047 sxg_register_interrupt(adapter);
2048 return (STATUS_SUCCESS);
2049
2050}
2051
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002052static void __devexit sxg_entry_remove(struct pci_dev *pcidev)
2053{
Mithlesh Thukral0d414722009-01-19 20:29:59 +05302054 u32 mmio_start = 0;
2055 u32 mmio_len = 0;
2056
Mithlesh Thukral942798b2009-01-05 21:14:34 +05302057 struct net_device *dev = pci_get_drvdata(pcidev);
J.R. Mauro73b07062008-10-28 18:42:02 -04002058 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05302059
2060 flush_scheduled_work();
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05302061
2062 /* Deallocate Resources */
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302063 unregister_netdev(dev);
2064 sxg_free_resources(adapter);
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05302065
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002066 ASSERT(adapter);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002067
Mithlesh Thukral0d414722009-01-19 20:29:59 +05302068 mmio_start = pci_resource_start(pcidev, 0);
2069 mmio_len = pci_resource_len(pcidev, 0);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002070
Mithlesh Thukral0d414722009-01-19 20:29:59 +05302071 DBG_ERROR("sxg: %s rel_region(0) start[%x] len[%x]\n", __FUNCTION__,
2072 mmio_start, mmio_len);
2073 release_mem_region(mmio_start, mmio_len);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002074
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05302075 mmio_start = pci_resource_start(pcidev, 2);
2076 mmio_len = pci_resource_len(pcidev, 2);
2077
2078 DBG_ERROR("sxg: %s rel_region(2) start[%x] len[%x]\n", __FUNCTION__,
2079 mmio_start, mmio_len);
2080 release_mem_region(mmio_start, mmio_len);
2081
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05302082 pci_disable_device(pcidev);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002083
Harvey Harrisone88bd232008-10-17 14:46:10 -07002084 DBG_ERROR("sxg: %s deallocate device\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002085 kfree(dev);
Harvey Harrisone88bd232008-10-17 14:46:10 -07002086 DBG_ERROR("sxg: %s EXIT\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002087}
2088
Mithlesh Thukral942798b2009-01-05 21:14:34 +05302089static int sxg_entry_halt(struct net_device *dev)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002090{
J.R. Mauro73b07062008-10-28 18:42:02 -04002091 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002092
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05302093 napi_disable(&adapter->napi);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002094 spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
Harvey Harrisone88bd232008-10-17 14:46:10 -07002095 DBG_ERROR("sxg: %s (%s) ENTER\n", __func__, dev->name);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002096
2097 netif_stop_queue(adapter->netdev);
2098 adapter->state = ADAPT_DOWN;
2099 adapter->linkstate = LINK_DOWN;
2100 adapter->devflags_prev = 0;
2101 DBG_ERROR("sxg: %s (%s) set adapter[%p] state to ADAPT_DOWN(%d)\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -07002102 __func__, dev->name, adapter, adapter->state);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002103
Harvey Harrisone88bd232008-10-17 14:46:10 -07002104 DBG_ERROR("sxg: %s (%s) EXIT\n", __func__, dev->name);
2105 DBG_ERROR("sxg: %s EXIT\n", __func__);
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05302106
2107 /* Disable interrupts */
2108 SXG_DISABLE_ALL_INTERRUPTS(adapter);
2109
Mithlesh Thukral0d414722009-01-19 20:29:59 +05302110 netif_carrier_off(dev);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002111 spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05302112
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302113 sxg_deregister_interrupt(adapter);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002114 return (STATUS_SUCCESS);
2115}
2116
Mithlesh Thukral942798b2009-01-05 21:14:34 +05302117static int sxg_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002118{
2119 ASSERT(rq);
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302120/* DBG_ERROR("sxg: %s cmd[%x] rq[%p] dev[%p]\n", __func__, cmd, rq, dev);*/
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002121 switch (cmd) {
2122 case SIOCSLICSETINTAGG:
2123 {
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302124 /* struct adapter_t *adapter = (struct adapter_t *)
2125 * netdev_priv(dev);
2126 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002127 u32 data[7];
2128 u32 intagg;
2129
2130 if (copy_from_user(data, rq->ifr_data, 28)) {
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302131 DBG_ERROR("copy_from_user FAILED getting \
2132 initial params\n");
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002133 return -EFAULT;
2134 }
2135 intagg = data[0];
2136 printk(KERN_EMERG
2137 "%s: set interrupt aggregation to %d\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -07002138 __func__, intagg);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002139 return 0;
2140 }
2141
2142 default:
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302143 /* DBG_ERROR("sxg: %s UNSUPPORTED[%x]\n", __func__, cmd); */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002144 return -EOPNOTSUPP;
2145 }
2146 return 0;
2147}
2148
2149#define NORMAL_ETHFRAME 0
2150
2151/*
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002152 * sxg_send_packets - Send a skb packet
2153 *
2154 * Arguments:
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302155 * skb - The packet to send
2156 * dev - Our linux net device that refs our adapter
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002157 *
2158 * Return:
2159 * 0 regardless of outcome XXXTODO refer to e1000 driver
2160 */
Mithlesh Thukral942798b2009-01-05 21:14:34 +05302161static int sxg_send_packets(struct sk_buff *skb, struct net_device *dev)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002162{
J.R. Mauro73b07062008-10-28 18:42:02 -04002163 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002164 u32 status = STATUS_SUCCESS;
2165
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302166 /*
2167 * DBG_ERROR("sxg: %s ENTER sxg_send_packets skb[%p]\n", __FUNCTION__,
2168 * skb);
2169 */
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05302170
J.R. Maurob243c4a2008-10-20 19:28:58 -04002171 /* Check the adapter state */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002172 switch (adapter->State) {
2173 case SXG_STATE_INITIALIZING:
2174 case SXG_STATE_HALTED:
2175 case SXG_STATE_SHUTDOWN:
J.R. Maurob243c4a2008-10-20 19:28:58 -04002176 ASSERT(0); /* unexpected */
2177 /* fall through */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002178 case SXG_STATE_RESETTING:
2179 case SXG_STATE_SLEEP:
2180 case SXG_STATE_BOOTDIAG:
2181 case SXG_STATE_DIAG:
2182 case SXG_STATE_HALTING:
2183 status = STATUS_FAILURE;
2184 break;
2185 case SXG_STATE_RUNNING:
2186 if (adapter->LinkState != SXG_LINK_UP) {
2187 status = STATUS_FAILURE;
2188 }
2189 break;
2190 default:
2191 ASSERT(0);
2192 status = STATUS_FAILURE;
2193 }
2194 if (status != STATUS_SUCCESS) {
2195 goto xmit_fail;
2196 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04002197 /* send a packet */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002198 status = sxg_transmit_packet(adapter, skb);
2199 if (status == STATUS_SUCCESS) {
2200 goto xmit_done;
2201 }
2202
2203 xmit_fail:
J.R. Maurob243c4a2008-10-20 19:28:58 -04002204 /* reject & complete all the packets if they cant be sent */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002205 if (status != STATUS_SUCCESS) {
2206#if XXXTODO
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302207 /* sxg_send_packets_fail(adapter, skb, status); */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002208#else
2209 SXG_DROP_DUMB_SEND(adapter, skb);
2210 adapter->stats.tx_dropped++;
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302211 return NETDEV_TX_BUSY;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002212#endif
2213 }
Harvey Harrisone88bd232008-10-17 14:46:10 -07002214 DBG_ERROR("sxg: %s EXIT sxg_send_packets status[%x]\n", __func__,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002215 status);
2216
2217 xmit_done:
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302218 return NETDEV_TX_OK;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002219}
2220
2221/*
2222 * sxg_transmit_packet
2223 *
2224 * This function transmits a single packet.
2225 *
2226 * Arguments -
2227 * adapter - Pointer to our adapter structure
2228 * skb - The packet to be sent
2229 *
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302230 * Return - STATUS of send
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002231 */
J.R. Mauro73b07062008-10-28 18:42:02 -04002232static int sxg_transmit_packet(struct adapter_t *adapter, struct sk_buff *skb)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002233{
Mithlesh Thukral942798b2009-01-05 21:14:34 +05302234 struct sxg_x64_sgl *pSgl;
2235 struct sxg_scatter_gather *SxgSgl;
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302236 unsigned long sgl_flags;
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05302237 /* void *SglBuffer; */
2238 /* u32 SglBufferLength; */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002239
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302240 /*
2241 * The vast majority of work is done in the shared
2242 * sxg_dumb_sgl routine.
2243 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002244 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSend",
2245 adapter, skb, 0, 0);
2246
J.R. Maurob243c4a2008-10-20 19:28:58 -04002247 /* Allocate a SGL buffer */
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302248 SXG_GET_SGL_BUFFER(adapter, SxgSgl, 0);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002249 if (!SxgSgl) {
2250 adapter->Stats.NoSglBuf++;
Mithlesh Thukral54aed112009-01-19 20:27:17 +05302251 adapter->stats.tx_errors++;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002252 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "SndPktF1",
2253 adapter, skb, 0, 0);
2254 return (STATUS_RESOURCES);
2255 }
2256 ASSERT(SxgSgl->adapter == adapter);
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05302257 /*SglBuffer = SXG_SGL_BUFFER(SxgSgl);
2258 SglBufferLength = SXG_SGL_BUF_SIZE; */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002259 SxgSgl->VlanTag.VlanTci = 0;
2260 SxgSgl->VlanTag.VlanTpid = 0;
2261 SxgSgl->Type = SXG_SGL_DUMB;
2262 SxgSgl->DumbPacket = skb;
2263 pSgl = NULL;
2264
J.R. Maurob243c4a2008-10-20 19:28:58 -04002265 /* Call the common sxg_dumb_sgl routine to complete the send. */
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302266 return (sxg_dumb_sgl(pSgl, SxgSgl));
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002267}
2268
2269/*
2270 * sxg_dumb_sgl
2271 *
2272 * Arguments:
2273 * pSgl -
Mithlesh Thukral942798b2009-01-05 21:14:34 +05302274 * SxgSgl - struct sxg_scatter_gather
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002275 *
2276 * Return Value:
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302277 * Status of send operation.
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002278 */
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302279static int sxg_dumb_sgl(struct sxg_x64_sgl *pSgl,
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302280 struct sxg_scatter_gather *SxgSgl)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002281{
J.R. Mauro73b07062008-10-28 18:42:02 -04002282 struct adapter_t *adapter = SxgSgl->adapter;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002283 struct sk_buff *skb = SxgSgl->DumbPacket;
J.R. Maurob243c4a2008-10-20 19:28:58 -04002284 /* For now, all dumb-nic sends go on RSS queue zero */
Mithlesh Thukral942798b2009-01-05 21:14:34 +05302285 struct sxg_xmt_ring *XmtRing = &adapter->XmtRings[0];
2286 struct sxg_ring_info *XmtRingInfo = &adapter->XmtRingZeroInfo;
2287 struct sxg_cmd *XmtCmd = NULL;
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302288 /* u32 Index = 0; */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002289 u32 DataLength = skb->len;
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302290 /* unsigned int BufLen; */
2291 /* u32 SglOffset; */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002292 u64 phys_addr;
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302293 unsigned long flags;
Mithlesh Thukral0d414722009-01-19 20:29:59 +05302294 unsigned long queue_id=0;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002295
2296 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSgl",
2297 pSgl, SxgSgl, 0, 0);
2298
J.R. Maurob243c4a2008-10-20 19:28:58 -04002299 /* Set aside a pointer to the sgl */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002300 SxgSgl->pSgl = pSgl;
2301
J.R. Maurob243c4a2008-10-20 19:28:58 -04002302 /* Sanity check that our SGL format is as we expect. */
Mithlesh Thukral942798b2009-01-05 21:14:34 +05302303 ASSERT(sizeof(struct sxg_x64_sge) == sizeof(struct sxg_x64_sge));
J.R. Maurob243c4a2008-10-20 19:28:58 -04002304 /* Shouldn't be a vlan tag on this frame */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002305 ASSERT(SxgSgl->VlanTag.VlanTci == 0);
2306 ASSERT(SxgSgl->VlanTag.VlanTpid == 0);
2307
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302308 /*
2309 * From here below we work with the SGL placed in our
2310 * buffer.
2311 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002312
2313 SxgSgl->Sgl.NumberOfElements = 1;
Mithlesh Thukral0d414722009-01-19 20:29:59 +05302314 /*
2315 * Set ucode Queue ID based on bottom bits of destination TCP port.
2316 * This Queue ID splits slowpath/dumb-nic packet processing across
2317 * multiple threads on the card to improve performance. It is split
2318 * using the TCP port to avoid out-of-order packets that can result
2319 * from multithreaded processing. We use the destination port because
2320 * we expect to be run on a server, so in nearly all cases the local
2321 * port is likely to be constant (well-known server port) and the
2322 * remote port is likely to be random. The exception to this is iSCSI,
2323 * in which case we use the sport instead. Note
2324 * that original attempt at XOR'ing source and dest port resulted in
2325 * poor balance on NTTTCP/iometer applications since they tend to
2326 * line up (even-even, odd-odd..).
2327 */
2328
2329 if (skb->protocol == htons(ETH_P_IP)) {
2330 struct iphdr *ip;
2331
2332 ip = ip_hdr(skb);
2333 if ((ip->protocol == IPPROTO_TCP)&&(DataLength >= sizeof(
2334 struct tcphdr))){
2335 queue_id = ((ntohs(tcp_hdr(skb)->dest) == ISCSI_PORT) ?
2336 (ntohs (tcp_hdr(skb)->source) &
2337 SXG_LARGE_SEND_QUEUE_MASK):
2338 (ntohs(tcp_hdr(skb)->dest) &
2339 SXG_LARGE_SEND_QUEUE_MASK));
2340 }
2341 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2342 if ( (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) && (DataLength >=
2343 sizeof(struct tcphdr)) ) {
2344 queue_id = ((ntohs(tcp_hdr(skb)->dest) == ISCSI_PORT) ?
2345 (ntohs (tcp_hdr(skb)->source) &
2346 SXG_LARGE_SEND_QUEUE_MASK):
2347 (ntohs(tcp_hdr(skb)->dest) &
2348 SXG_LARGE_SEND_QUEUE_MASK));
2349 }
2350 }
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002351
J.R. Maurob243c4a2008-10-20 19:28:58 -04002352 /* Grab the spinlock and acquire a command */
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302353 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002354 SXG_GET_CMD(XmtRing, XmtRingInfo, XmtCmd, SxgSgl);
2355 if (XmtCmd == NULL) {
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302356 /*
2357 * Call sxg_complete_slow_send to see if we can
2358 * free up any XmtRingZero entries and then try again
2359 */
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302360
2361 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
2362 sxg_complete_slow_send(adapter, 0);
2363 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002364 SXG_GET_CMD(XmtRing, XmtRingInfo, XmtCmd, SxgSgl);
2365 if (XmtCmd == NULL) {
2366 adapter->Stats.XmtZeroFull++;
2367 goto abortcmd;
2368 }
2369 }
2370 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbCmd",
2371 XmtCmd, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
J.R. Maurob243c4a2008-10-20 19:28:58 -04002372 /* Update stats */
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05302373 adapter->stats.tx_packets++;
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05302374 adapter->stats.tx_bytes += DataLength;
J.R. Maurob243c4a2008-10-20 19:28:58 -04002375#if XXXTODO /* Stats stuff */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002376 if (SXG_MULTICAST_PACKET(EtherHdr)) {
2377 if (SXG_BROADCAST_PACKET(EtherHdr)) {
2378 adapter->Stats.DumbXmtBcastPkts++;
2379 adapter->Stats.DumbXmtBcastBytes += DataLength;
2380 } else {
2381 adapter->Stats.DumbXmtMcastPkts++;
2382 adapter->Stats.DumbXmtMcastBytes += DataLength;
2383 }
2384 } else {
2385 adapter->Stats.DumbXmtUcastPkts++;
2386 adapter->Stats.DumbXmtUcastBytes += DataLength;
2387 }
2388#endif
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302389 /*
2390 * Fill in the command
2391 * Copy out the first SGE to the command and adjust for offset
2392 */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302393 phys_addr = pci_map_single(adapter->pcidev, skb->data, skb->len,
J.R. Mauro5c7514e2008-10-05 20:38:52 -04002394 PCI_DMA_TODEVICE);
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05302395 memset(XmtCmd, '\0', sizeof(*XmtCmd));
2396 XmtCmd->Buffer.FirstSgeAddress = phys_addr;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002397 XmtCmd->Buffer.FirstSgeLength = DataLength;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002398 XmtCmd->Buffer.SgeOffset = 0;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002399 XmtCmd->Buffer.TotalLength = DataLength;
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05302400 XmtCmd->SgEntries = 1;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002401 XmtCmd->Flags = 0;
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302402 /*
2403 * Advance transmit cmd descripter by 1.
2404 * NOTE - See comments in SxgTcpOutput where we write
2405 * to the XmtCmd register regarding CPU ID values and/or
2406 * multiple commands.
Mithlesh Thukral0d414722009-01-19 20:29:59 +05302407 * Top 16 bits specify queue_id. See comments about queue_id above
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302408 */
Mithlesh Thukral0d414722009-01-19 20:29:59 +05302409 /* Four queues at the moment */
2410 ASSERT((queue_id & ~SXG_LARGE_SEND_QUEUE_MASK) == 0);
2411 WRITE_REG(adapter->UcodeRegs[0].XmtCmd, ((queue_id << 16) | 1), TRUE);
J.R. Maurob243c4a2008-10-20 19:28:58 -04002412 adapter->Stats.XmtQLen++; /* Stats within lock */
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302413 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002414 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDumSgl2",
2415 XmtCmd, pSgl, SxgSgl, 0);
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302416 return STATUS_SUCCESS;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002417
2418 abortcmd:
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302419 /*
2420 * NOTE - Only jump to this label AFTER grabbing the
2421 * XmtZeroLock, and DO NOT DROP IT between the
2422 * command allocation and the following abort.
2423 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002424 if (XmtCmd) {
2425 SXG_ABORT_CMD(XmtRingInfo);
2426 }
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302427 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002428
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302429/*
2430 * failsgl:
2431 * Jump to this label if failure occurs before the
2432 * XmtZeroLock is grabbed
2433 */
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05302434 adapter->stats.tx_errors++;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002435 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumSGFal",
2436 pSgl, SxgSgl, XmtRingInfo->Head, XmtRingInfo->Tail);
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302437 /* SxgSgl->DumbPacket is the skb */
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302438 // SXG_COMPLETE_DUMB_SEND(adapter, SxgSgl->DumbPacket);
Mithlesh Thukral54aed112009-01-19 20:27:17 +05302439
2440 return STATUS_FAILURE;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002441}
2442
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002443/*
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302444 * Link management functions
2445 *
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002446 * sxg_initialize_link - Initialize the link stuff
2447 *
2448 * Arguments -
2449 * adapter - A pointer to our adapter structure
2450 *
2451 * Return
2452 * status
2453 */
J.R. Mauro73b07062008-10-28 18:42:02 -04002454static int sxg_initialize_link(struct adapter_t *adapter)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002455{
Mithlesh Thukral942798b2009-01-05 21:14:34 +05302456 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002457 u32 Value;
2458 u32 ConfigData;
2459 u32 MaxFrame;
2460 int status;
2461
2462 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "InitLink",
2463 adapter, 0, 0, 0);
2464
J.R. Maurob243c4a2008-10-20 19:28:58 -04002465 /* Reset PHY and XGXS module */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002466 WRITE_REG(HwRegs->LinkStatus, LS_SERDES_POWER_DOWN, TRUE);
2467
J.R. Maurob243c4a2008-10-20 19:28:58 -04002468 /* Reset transmit configuration register */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002469 WRITE_REG(HwRegs->XmtConfig, XMT_CONFIG_RESET, TRUE);
2470
J.R. Maurob243c4a2008-10-20 19:28:58 -04002471 /* Reset receive configuration register */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002472 WRITE_REG(HwRegs->RcvConfig, RCV_CONFIG_RESET, TRUE);
2473
J.R. Maurob243c4a2008-10-20 19:28:58 -04002474 /* Reset all MAC modules */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002475 WRITE_REG(HwRegs->MacConfig0, AXGMAC_CFG0_SUB_RESET, TRUE);
2476
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302477 /*
2478 * Link address 0
2479 * XXXTODO - This assumes the MAC address (0a:0b:0c:0d:0e:0f)
2480 * is stored with the first nibble (0a) in the byte 0
2481 * of the Mac address. Possibly reverse?
2482 */
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05302483 Value = *(u32 *) adapter->macaddr;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002484 WRITE_REG(HwRegs->LinkAddress0Low, Value, TRUE);
J.R. Maurob243c4a2008-10-20 19:28:58 -04002485 /* also write the MAC address to the MAC. Endian is reversed. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002486 WRITE_REG(HwRegs->MacAddressLow, ntohl(Value), TRUE);
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05302487 Value = (*(u16 *) & adapter->macaddr[4] & 0x0000FFFF);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002488 WRITE_REG(HwRegs->LinkAddress0High, Value | LINK_ADDRESS_ENABLE, TRUE);
J.R. Maurob243c4a2008-10-20 19:28:58 -04002489 /* endian swap for the MAC (put high bytes in bits [31:16], swapped) */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002490 Value = ntohl(Value);
2491 WRITE_REG(HwRegs->MacAddressHigh, Value, TRUE);
J.R. Maurob243c4a2008-10-20 19:28:58 -04002492 /* Link address 1 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002493 WRITE_REG(HwRegs->LinkAddress1Low, 0, TRUE);
2494 WRITE_REG(HwRegs->LinkAddress1High, 0, TRUE);
J.R. Maurob243c4a2008-10-20 19:28:58 -04002495 /* Link address 2 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002496 WRITE_REG(HwRegs->LinkAddress2Low, 0, TRUE);
2497 WRITE_REG(HwRegs->LinkAddress2High, 0, TRUE);
J.R. Maurob243c4a2008-10-20 19:28:58 -04002498 /* Link address 3 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002499 WRITE_REG(HwRegs->LinkAddress3Low, 0, TRUE);
2500 WRITE_REG(HwRegs->LinkAddress3High, 0, TRUE);
2501
J.R. Maurob243c4a2008-10-20 19:28:58 -04002502 /* Enable MAC modules */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002503 WRITE_REG(HwRegs->MacConfig0, 0, TRUE);
2504
J.R. Maurob243c4a2008-10-20 19:28:58 -04002505 /* Configure MAC */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302506 WRITE_REG(HwRegs->MacConfig1, (
2507 /* Allow sending of pause */
2508 AXGMAC_CFG1_XMT_PAUSE |
2509 /* Enable XMT */
2510 AXGMAC_CFG1_XMT_EN |
2511 /* Enable detection of pause */
2512 AXGMAC_CFG1_RCV_PAUSE |
2513 /* Enable receive */
2514 AXGMAC_CFG1_RCV_EN |
2515 /* short frame detection */
2516 AXGMAC_CFG1_SHORT_ASSERT |
2517 /* Verify frame length */
2518 AXGMAC_CFG1_CHECK_LEN |
2519 /* Generate FCS */
2520 AXGMAC_CFG1_GEN_FCS |
2521 /* Pad frames to 64 bytes */
2522 AXGMAC_CFG1_PAD_64),
2523 TRUE);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002524
J.R. Maurob243c4a2008-10-20 19:28:58 -04002525 /* Set AXGMAC max frame length if jumbo. Not needed for standard MTU */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002526 if (adapter->JumboEnabled) {
2527 WRITE_REG(HwRegs->MacMaxFrameLen, AXGMAC_MAXFRAME_JUMBO, TRUE);
2528 }
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302529 /*
2530 * AMIIM Configuration Register -
2531 * The value placed in the AXGMAC_AMIIM_CFG_HALF_CLOCK portion
2532 * (bottom bits) of this register is used to determine the MDC frequency
2533 * as specified in the A-XGMAC Design Document. This value must not be
2534 * zero. The following value (62 or 0x3E) is based on our MAC transmit
2535 * clock frequency (MTCLK) of 312.5 MHz. Given a maximum MDIO clock
2536 * frequency of 2.5 MHz (see the PHY spec), we get:
2537 * 312.5/(2*(X+1)) < 2.5 ==> X = 62.
2538 * This value happens to be the default value for this register, so we
2539 * really don't have to do this.
2540 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002541 WRITE_REG(HwRegs->MacAmiimConfig, 0x0000003E, TRUE);
2542
J.R. Maurob243c4a2008-10-20 19:28:58 -04002543 /* Power up and enable PHY and XAUI/XGXS/Serdes logic */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002544 WRITE_REG(HwRegs->LinkStatus,
2545 (LS_PHY_CLR_RESET |
2546 LS_XGXS_ENABLE |
2547 LS_XGXS_CTL | LS_PHY_CLK_EN | LS_ATTN_ALARM), TRUE);
2548 DBG_ERROR("After Power Up and enable PHY in sxg_initialize_link\n");
2549
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302550 /*
2551 * Per information given by Aeluros, wait 100 ms after removing reset.
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302552 * It's not enough to wait for the self-clearing reset bit in reg 0 to
2553 * clear.
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302554 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002555 mdelay(100);
2556
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302557 /* Verify the PHY has come up by checking that the Reset bit has
2558 * cleared.
2559 */
2560 status = sxg_read_mdio_reg(adapter,
2561 MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
2562 PHY_PMA_CONTROL1, /* PMA/PMD control register */
2563 &Value);
2564 DBG_ERROR("After sxg_read_mdio_reg Value[%x] fail=%x\n", Value,
2565 (Value & PMA_CONTROL1_RESET));
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002566 if (status != STATUS_SUCCESS)
2567 return (STATUS_FAILURE);
J.R. Maurob243c4a2008-10-20 19:28:58 -04002568 if (Value & PMA_CONTROL1_RESET) /* reset complete if bit is 0 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002569 return (STATUS_FAILURE);
2570
J.R. Maurob243c4a2008-10-20 19:28:58 -04002571 /* The SERDES should be initialized by now - confirm */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002572 READ_REG(HwRegs->LinkStatus, Value);
J.R. Maurob243c4a2008-10-20 19:28:58 -04002573 if (Value & LS_SERDES_DOWN) /* verify SERDES is initialized */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002574 return (STATUS_FAILURE);
2575
J.R. Maurob243c4a2008-10-20 19:28:58 -04002576 /* The XAUI link should also be up - confirm */
2577 if (!(Value & LS_XAUI_LINK_UP)) /* verify XAUI link is up */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002578 return (STATUS_FAILURE);
2579
J.R. Maurob243c4a2008-10-20 19:28:58 -04002580 /* Initialize the PHY */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002581 status = sxg_phy_init(adapter);
2582 if (status != STATUS_SUCCESS)
2583 return (STATUS_FAILURE);
2584
J.R. Maurob243c4a2008-10-20 19:28:58 -04002585 /* Enable the Link Alarm */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302586
2587 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module
2588 * LASI_CONTROL - LASI control register
2589 * LASI_CTL_LS_ALARM_ENABLE - enable link alarm bit
2590 */
2591 status = sxg_write_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
2592 LASI_CONTROL,
2593 LASI_CTL_LS_ALARM_ENABLE);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002594 if (status != STATUS_SUCCESS)
2595 return (STATUS_FAILURE);
2596
J.R. Maurob243c4a2008-10-20 19:28:58 -04002597 /* XXXTODO - temporary - verify bit is set */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302598
2599 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module
2600 * LASI_CONTROL - LASI control register
2601 */
2602 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
2603 LASI_CONTROL,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002604 &Value);
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302605
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002606 if (status != STATUS_SUCCESS)
2607 return (STATUS_FAILURE);
2608 if (!(Value & LASI_CTL_LS_ALARM_ENABLE)) {
2609 DBG_ERROR("Error! LASI Control Alarm Enable bit not set!\n");
2610 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04002611 /* Enable receive */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002612 MaxFrame = adapter->JumboEnabled ? JUMBOMAXFRAME : ETHERMAXFRAME;
2613 ConfigData = (RCV_CONFIG_ENABLE |
2614 RCV_CONFIG_ENPARSE |
2615 RCV_CONFIG_RCVBAD |
2616 RCV_CONFIG_RCVPAUSE |
2617 RCV_CONFIG_TZIPV6 |
2618 RCV_CONFIG_TZIPV4 |
2619 RCV_CONFIG_HASH_16 |
2620 RCV_CONFIG_SOCKET | RCV_CONFIG_BUFSIZE(MaxFrame));
2621 WRITE_REG(HwRegs->RcvConfig, ConfigData, TRUE);
2622
2623 WRITE_REG(HwRegs->XmtConfig, XMT_CONFIG_ENABLE, TRUE);
2624
J.R. Maurob243c4a2008-10-20 19:28:58 -04002625 /* Mark the link as down. We'll get a link event when it comes up. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002626 sxg_link_state(adapter, SXG_LINK_DOWN);
2627
2628 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XInitLnk",
2629 adapter, 0, 0, 0);
2630 return (STATUS_SUCCESS);
2631}
2632
2633/*
2634 * sxg_phy_init - Initialize the PHY
2635 *
2636 * Arguments -
2637 * adapter - A pointer to our adapter structure
2638 *
2639 * Return
2640 * status
2641 */
J.R. Mauro73b07062008-10-28 18:42:02 -04002642static int sxg_phy_init(struct adapter_t *adapter)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002643{
2644 u32 Value;
Mithlesh Thukral942798b2009-01-05 21:14:34 +05302645 struct phy_ucode *p;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002646 int status;
2647
Harvey Harrisone88bd232008-10-17 14:46:10 -07002648 DBG_ERROR("ENTER %s\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002649
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302650 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module
2651 * 0xC205 - PHY ID register (?)
2652 * &Value - XXXTODO - add def
2653 */
2654 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
2655 0xC205,
2656 &Value);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002657 if (status != STATUS_SUCCESS)
2658 return (STATUS_FAILURE);
2659
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302660 if (Value == 0x0012) {
2661 /* 0x0012 == AEL2005C PHY(?) - XXXTODO - add def */
2662 DBG_ERROR("AEL2005C PHY detected. Downloading PHY \
2663 microcode.\n");
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002664
J.R. Maurob243c4a2008-10-20 19:28:58 -04002665 /* Initialize AEL2005C PHY and download PHY microcode */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002666 for (p = PhyUcode; p->Addr != 0xFFFF; p++) {
2667 if (p->Addr == 0) {
J.R. Maurob243c4a2008-10-20 19:28:58 -04002668 /* if address == 0, data == sleep time in ms */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002669 mdelay(p->Data);
2670 } else {
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302671 /* write the given data to the specified address */
2672 status = sxg_write_mdio_reg(adapter,
2673 MIIM_DEV_PHY_PMA,
2674 /* PHY address */
2675 p->Addr,
2676 /* PHY data */
2677 p->Data);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002678 if (status != STATUS_SUCCESS)
2679 return (STATUS_FAILURE);
2680 }
2681 }
2682 }
Harvey Harrisone88bd232008-10-17 14:46:10 -07002683 DBG_ERROR("EXIT %s\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002684
2685 return (STATUS_SUCCESS);
2686}
2687
2688/*
2689 * sxg_link_event - Process a link event notification from the card
2690 *
2691 * Arguments -
2692 * adapter - A pointer to our adapter structure
2693 *
2694 * Return
2695 * None
2696 */
J.R. Mauro73b07062008-10-28 18:42:02 -04002697static void sxg_link_event(struct adapter_t *adapter)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002698{
Mithlesh Thukral942798b2009-01-05 21:14:34 +05302699 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
Mithlesh Thukral0d414722009-01-19 20:29:59 +05302700 struct net_device *netdev = adapter->netdev;
J.R. Mauro73b07062008-10-28 18:42:02 -04002701 enum SXG_LINK_STATE LinkState;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002702 int status;
2703 u32 Value;
2704
2705 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "LinkEvnt",
2706 adapter, 0, 0, 0);
Harvey Harrisone88bd232008-10-17 14:46:10 -07002707 DBG_ERROR("ENTER %s\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002708
J.R. Maurob243c4a2008-10-20 19:28:58 -04002709 /* Check the Link Status register. We should have a Link Alarm. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002710 READ_REG(HwRegs->LinkStatus, Value);
2711 if (Value & LS_LINK_ALARM) {
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302712 /*
2713 * We got a Link Status alarm. First, pause to let the
2714 * link state settle (it can bounce a number of times)
2715 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002716 mdelay(10);
2717
J.R. Maurob243c4a2008-10-20 19:28:58 -04002718 /* Now clear the alarm by reading the LASI status register. */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302719 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module */
2720 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
2721 /* LASI status register */
2722 LASI_STATUS,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002723 &Value);
2724 if (status != STATUS_SUCCESS) {
2725 DBG_ERROR("Error reading LASI Status MDIO register!\n");
2726 sxg_link_state(adapter, SXG_LINK_DOWN);
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302727 /* ASSERT(0); */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002728 }
2729 ASSERT(Value & LASI_STATUS_LS_ALARM);
2730
J.R. Maurob243c4a2008-10-20 19:28:58 -04002731 /* Now get and set the link state */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002732 LinkState = sxg_get_link_state(adapter);
2733 sxg_link_state(adapter, LinkState);
2734 DBG_ERROR("SXG: Link Alarm occurred. Link is %s\n",
2735 ((LinkState == SXG_LINK_UP) ? "UP" : "DOWN"));
Mithlesh Thukral0d414722009-01-19 20:29:59 +05302736 if (LinkState == SXG_LINK_UP)
2737 netif_carrier_on(netdev);
2738 else
2739 netif_carrier_off(netdev);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002740 } else {
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302741 /*
2742 * XXXTODO - Assuming Link Attention is only being generated
2743 * for the Link Alarm pin (and not for a XAUI Link Status change)
2744 * , then it's impossible to get here. Yet we've gotten here
2745 * twice (under extreme conditions - bouncing the link up and
2746 * down many times a second). Needs further investigation.
2747 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002748 DBG_ERROR("SXG: sxg_link_event: Can't get here!\n");
2749 DBG_ERROR("SXG: Link Status == 0x%08X.\n", Value);
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302750 /* ASSERT(0); */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002751 }
Harvey Harrisone88bd232008-10-17 14:46:10 -07002752 DBG_ERROR("EXIT %s\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002753
2754}
2755
2756/*
2757 * sxg_get_link_state - Determine if the link is up or down
2758 *
2759 * Arguments -
2760 * adapter - A pointer to our adapter structure
2761 *
2762 * Return
2763 * Link State
2764 */
J.R. Mauro73b07062008-10-28 18:42:02 -04002765static enum SXG_LINK_STATE sxg_get_link_state(struct adapter_t *adapter)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002766{
2767 int status;
2768 u32 Value;
2769
Harvey Harrisone88bd232008-10-17 14:46:10 -07002770 DBG_ERROR("ENTER %s\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002771
2772 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "GetLink",
2773 adapter, 0, 0, 0);
2774
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302775 /*
2776 * Per the Xenpak spec (and the IEEE 10Gb spec?), the link is up if
2777 * the following 3 bits (from 3 different MDIO registers) are all true.
2778 */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302779
2780 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module */
2781 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
2782 /* PMA/PMD Receive Signal Detect register */
2783 PHY_PMA_RCV_DET,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002784 &Value);
2785 if (status != STATUS_SUCCESS)
2786 goto bad;
2787
J.R. Maurob243c4a2008-10-20 19:28:58 -04002788 /* If PMA/PMD receive signal detect is 0, then the link is down */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002789 if (!(Value & PMA_RCV_DETECT))
2790 return (SXG_LINK_DOWN);
2791
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302792 /* MIIM_DEV_PHY_PCS - PHY PCS module */
2793 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PCS,
2794 /* PCS 10GBASE-R Status 1 register */
2795 PHY_PCS_10G_STATUS1,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002796 &Value);
2797 if (status != STATUS_SUCCESS)
2798 goto bad;
2799
J.R. Maurob243c4a2008-10-20 19:28:58 -04002800 /* If PCS is not locked to receive blocks, then the link is down */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002801 if (!(Value & PCS_10B_BLOCK_LOCK))
2802 return (SXG_LINK_DOWN);
2803
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302804 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_XS,/* PHY XS module */
2805 /* XS Lane Status register */
2806 PHY_XS_LANE_STATUS,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002807 &Value);
2808 if (status != STATUS_SUCCESS)
2809 goto bad;
2810
J.R. Maurob243c4a2008-10-20 19:28:58 -04002811 /* If XS transmit lanes are not aligned, then the link is down */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002812 if (!(Value & XS_LANE_ALIGN))
2813 return (SXG_LINK_DOWN);
2814
J.R. Maurob243c4a2008-10-20 19:28:58 -04002815 /* All 3 bits are true, so the link is up */
Harvey Harrisone88bd232008-10-17 14:46:10 -07002816 DBG_ERROR("EXIT %s\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002817
2818 return (SXG_LINK_UP);
2819
2820 bad:
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302821 /* An error occurred reading an MDIO register. This shouldn't happen. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002822 DBG_ERROR("Error reading an MDIO register!\n");
2823 ASSERT(0);
2824 return (SXG_LINK_DOWN);
2825}
2826
J.R. Mauro73b07062008-10-28 18:42:02 -04002827static void sxg_indicate_link_state(struct adapter_t *adapter,
2828 enum SXG_LINK_STATE LinkState)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002829{
2830 if (adapter->LinkState == SXG_LINK_UP) {
2831 DBG_ERROR("%s: LINK now UP, call netif_start_queue\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -07002832 __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002833 netif_start_queue(adapter->netdev);
2834 } else {
2835 DBG_ERROR("%s: LINK now DOWN, call netif_stop_queue\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -07002836 __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002837 netif_stop_queue(adapter->netdev);
2838 }
2839}
2840
2841/*
2842 * sxg_link_state - Set the link state and if necessary, indicate.
2843 * This routine the central point of processing for all link state changes.
2844 * Nothing else in the driver should alter the link state or perform
2845 * link state indications
2846 *
2847 * Arguments -
2848 * adapter - A pointer to our adapter structure
2849 * LinkState - The link state
2850 *
2851 * Return
2852 * None
2853 */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302854static void sxg_link_state(struct adapter_t *adapter,
2855 enum SXG_LINK_STATE LinkState)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002856{
2857 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "LnkINDCT",
2858 adapter, LinkState, adapter->LinkState, adapter->State);
2859
Harvey Harrisone88bd232008-10-17 14:46:10 -07002860 DBG_ERROR("ENTER %s\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002861
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302862 /*
2863 * Hold the adapter lock during this routine. Maybe move
2864 * the lock to the caller.
2865 */
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05302866 /* IMP TODO : Check if we can survive without taking this lock */
2867// spin_lock(&adapter->AdapterLock);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002868 if (LinkState == adapter->LinkState) {
J.R. Maurob243c4a2008-10-20 19:28:58 -04002869 /* Nothing changed.. */
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05302870// spin_unlock(&adapter->AdapterLock);
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302871 DBG_ERROR("EXIT #0 %s. Link status = %d\n",
2872 __func__, LinkState);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002873 return;
2874 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04002875 /* Save the adapter state */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002876 adapter->LinkState = LinkState;
2877
J.R. Maurob243c4a2008-10-20 19:28:58 -04002878 /* Drop the lock and indicate link state */
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05302879// spin_unlock(&adapter->AdapterLock);
Harvey Harrisone88bd232008-10-17 14:46:10 -07002880 DBG_ERROR("EXIT #1 %s\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002881
2882 sxg_indicate_link_state(adapter, LinkState);
2883}
2884
2885/*
2886 * sxg_write_mdio_reg - Write to a register on the MDIO bus
2887 *
2888 * Arguments -
2889 * adapter - A pointer to our adapter structure
2890 * DevAddr - MDIO device number being addressed
2891 * RegAddr - register address for the specified MDIO device
2892 * Value - value to write to the MDIO register
2893 *
2894 * Return
2895 * status
2896 */
J.R. Mauro73b07062008-10-28 18:42:02 -04002897static int sxg_write_mdio_reg(struct adapter_t *adapter,
J.R. Mauro5c7514e2008-10-05 20:38:52 -04002898 u32 DevAddr, u32 RegAddr, u32 Value)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002899{
Mithlesh Thukral942798b2009-01-05 21:14:34 +05302900 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302901 /* Address operation (written to MIIM field reg) */
2902 u32 AddrOp;
2903 /* Write operation (written to MIIM field reg) */
2904 u32 WriteOp;
2905 u32 Cmd;/* Command (written to MIIM command reg) */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002906 u32 ValueRead;
2907 u32 Timeout;
2908
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302909 /* DBG_ERROR("ENTER %s\n", __func__); */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002910
2911 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO",
2912 adapter, 0, 0, 0);
2913
J.R. Maurob243c4a2008-10-20 19:28:58 -04002914 /* Ensure values don't exceed field width */
2915 DevAddr &= 0x001F; /* 5-bit field */
2916 RegAddr &= 0xFFFF; /* 16-bit field */
2917 Value &= 0xFFFF; /* 16-bit field */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002918
J.R. Maurob243c4a2008-10-20 19:28:58 -04002919 /* Set MIIM field register bits for an MIIM address operation */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002920 AddrOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
2921 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
2922 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
2923 (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) | RegAddr;
2924
J.R. Maurob243c4a2008-10-20 19:28:58 -04002925 /* Set MIIM field register bits for an MIIM write operation */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002926 WriteOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
2927 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
2928 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
2929 (MIIM_OP_WRITE << AXGMAC_AMIIM_FIELD_OP_SHIFT) | Value;
2930
J.R. Maurob243c4a2008-10-20 19:28:58 -04002931 /* Set MIIM command register bits to execute an MIIM command */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002932 Cmd = AXGMAC_AMIIM_CMD_START | AXGMAC_AMIIM_CMD_10G_OPERATION;
2933
J.R. Maurob243c4a2008-10-20 19:28:58 -04002934 /* Reset the command register command bit (in case it's not 0) */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002935 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
2936
J.R. Maurob243c4a2008-10-20 19:28:58 -04002937 /* MIIM write to set the address of the specified MDIO register */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002938 WRITE_REG(HwRegs->MacAmiimField, AddrOp, TRUE);
2939
J.R. Maurob243c4a2008-10-20 19:28:58 -04002940 /* Write to MIIM Command Register to execute to address operation */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002941 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
2942
J.R. Maurob243c4a2008-10-20 19:28:58 -04002943 /* Poll AMIIM Indicator register to wait for completion */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002944 Timeout = SXG_LINK_TIMEOUT;
2945 do {
J.R. Maurob243c4a2008-10-20 19:28:58 -04002946 udelay(100); /* Timeout in 100us units */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002947 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
2948 if (--Timeout == 0) {
2949 return (STATUS_FAILURE);
2950 }
2951 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
2952
J.R. Maurob243c4a2008-10-20 19:28:58 -04002953 /* Reset the command register command bit */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002954 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
2955
J.R. Maurob243c4a2008-10-20 19:28:58 -04002956 /* MIIM write to set up an MDIO write operation */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002957 WRITE_REG(HwRegs->MacAmiimField, WriteOp, TRUE);
2958
J.R. Maurob243c4a2008-10-20 19:28:58 -04002959 /* Write to MIIM Command Register to execute the write operation */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002960 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
2961
J.R. Maurob243c4a2008-10-20 19:28:58 -04002962 /* Poll AMIIM Indicator register to wait for completion */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002963 Timeout = SXG_LINK_TIMEOUT;
2964 do {
J.R. Maurob243c4a2008-10-20 19:28:58 -04002965 udelay(100); /* Timeout in 100us units */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002966 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
2967 if (--Timeout == 0) {
2968 return (STATUS_FAILURE);
2969 }
2970 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
2971
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302972 /* DBG_ERROR("EXIT %s\n", __func__); */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002973
2974 return (STATUS_SUCCESS);
2975}
2976
2977/*
2978 * sxg_read_mdio_reg - Read a register on the MDIO bus
2979 *
2980 * Arguments -
2981 * adapter - A pointer to our adapter structure
2982 * DevAddr - MDIO device number being addressed
2983 * RegAddr - register address for the specified MDIO device
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302984 * pValue - pointer to where to put data read from the MDIO register
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002985 *
2986 * Return
2987 * status
2988 */
J.R. Mauro73b07062008-10-28 18:42:02 -04002989static int sxg_read_mdio_reg(struct adapter_t *adapter,
J.R. Mauro5c7514e2008-10-05 20:38:52 -04002990 u32 DevAddr, u32 RegAddr, u32 *pValue)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002991{
Mithlesh Thukral942798b2009-01-05 21:14:34 +05302992 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302993 u32 AddrOp; /* Address operation (written to MIIM field reg) */
2994 u32 ReadOp; /* Read operation (written to MIIM field reg) */
2995 u32 Cmd; /* Command (written to MIIM command reg) */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002996 u32 ValueRead;
2997 u32 Timeout;
2998
2999 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO",
3000 adapter, 0, 0, 0);
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303001 DBG_ERROR("ENTER %s\n", __FUNCTION__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003002
J.R. Maurob243c4a2008-10-20 19:28:58 -04003003 /* Ensure values don't exceed field width */
3004 DevAddr &= 0x001F; /* 5-bit field */
3005 RegAddr &= 0xFFFF; /* 16-bit field */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003006
J.R. Maurob243c4a2008-10-20 19:28:58 -04003007 /* Set MIIM field register bits for an MIIM address operation */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003008 AddrOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
3009 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
3010 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
3011 (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) | RegAddr;
3012
J.R. Maurob243c4a2008-10-20 19:28:58 -04003013 /* Set MIIM field register bits for an MIIM read operation */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003014 ReadOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
3015 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
3016 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
3017 (MIIM_OP_READ << AXGMAC_AMIIM_FIELD_OP_SHIFT);
3018
J.R. Maurob243c4a2008-10-20 19:28:58 -04003019 /* Set MIIM command register bits to execute an MIIM command */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003020 Cmd = AXGMAC_AMIIM_CMD_START | AXGMAC_AMIIM_CMD_10G_OPERATION;
3021
J.R. Maurob243c4a2008-10-20 19:28:58 -04003022 /* Reset the command register command bit (in case it's not 0) */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003023 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
3024
J.R. Maurob243c4a2008-10-20 19:28:58 -04003025 /* MIIM write to set the address of the specified MDIO register */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003026 WRITE_REG(HwRegs->MacAmiimField, AddrOp, TRUE);
3027
J.R. Maurob243c4a2008-10-20 19:28:58 -04003028 /* Write to MIIM Command Register to execute to address operation */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003029 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
3030
J.R. Maurob243c4a2008-10-20 19:28:58 -04003031 /* Poll AMIIM Indicator register to wait for completion */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003032 Timeout = SXG_LINK_TIMEOUT;
3033 do {
J.R. Maurob243c4a2008-10-20 19:28:58 -04003034 udelay(100); /* Timeout in 100us units */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003035 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
3036 if (--Timeout == 0) {
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05303037 DBG_ERROR("EXIT %s with STATUS_FAILURE 1\n", __FUNCTION__);
3038
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003039 return (STATUS_FAILURE);
3040 }
3041 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
3042
J.R. Maurob243c4a2008-10-20 19:28:58 -04003043 /* Reset the command register command bit */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003044 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
3045
J.R. Maurob243c4a2008-10-20 19:28:58 -04003046 /* MIIM write to set up an MDIO register read operation */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003047 WRITE_REG(HwRegs->MacAmiimField, ReadOp, TRUE);
3048
J.R. Maurob243c4a2008-10-20 19:28:58 -04003049 /* Write to MIIM Command Register to execute the read operation */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003050 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
3051
J.R. Maurob243c4a2008-10-20 19:28:58 -04003052 /* Poll AMIIM Indicator register to wait for completion */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003053 Timeout = SXG_LINK_TIMEOUT;
3054 do {
J.R. Maurob243c4a2008-10-20 19:28:58 -04003055 udelay(100); /* Timeout in 100us units */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003056 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
3057 if (--Timeout == 0) {
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05303058 DBG_ERROR("EXIT %s with STATUS_FAILURE 2\n", __FUNCTION__);
3059
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003060 return (STATUS_FAILURE);
3061 }
3062 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
3063
J.R. Maurob243c4a2008-10-20 19:28:58 -04003064 /* Read the MDIO register data back from the field register */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003065 READ_REG(HwRegs->MacAmiimField, *pValue);
J.R. Maurob243c4a2008-10-20 19:28:58 -04003066 *pValue &= 0xFFFF; /* data is in the lower 16 bits */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003067
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303068 DBG_ERROR("EXIT %s\n", __FUNCTION__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003069
3070 return (STATUS_SUCCESS);
3071}
3072
3073/*
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -07003074 * Functions to obtain the CRC corresponding to the destination mac address.
3075 * This is a standard ethernet CRC in that it is a 32-bit, reflected CRC using
3076 * the polynomial:
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303077 * x^32 + x^26 + x^23 + x^22 + x^16 + x^12 + x^11 + x^10 + x^8 + x^7 + x^5
3078 * + x^4 + x^2 + x^1.
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -07003079 *
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303080 * After the CRC for the 6 bytes is generated (but before the value is
3081 * complemented), we must then transpose the value and return bits 30-23.
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -07003082 */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303083static u32 sxg_crc_table[256];/* Table of CRC's for all possible byte values */
3084static u32 sxg_crc_init; /* Is table initialized */
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -07003085
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303086/* Contruct the CRC32 table */
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -07003087static void sxg_mcast_init_crc32(void)
3088{
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303089 u32 c; /* CRC shit reg */
3090 u32 e = 0; /* Poly X-or pattern */
3091 int i; /* counter */
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -07003092 int k; /* byte being shifted into crc */
3093
3094 static int p[] = { 0, 1, 2, 4, 5, 7, 8, 10, 11, 12, 16, 22, 23, 26 };
3095
3096 for (i = 0; i < sizeof(p) / sizeof(int); i++) {
3097 e |= 1L << (31 - p[i]);
3098 }
3099
3100 for (i = 1; i < 256; i++) {
3101 c = i;
3102 for (k = 8; k; k--) {
3103 c = c & 1 ? (c >> 1) ^ e : c >> 1;
3104 }
3105 sxg_crc_table[i] = c;
3106 }
3107}
3108
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -07003109/*
3110 * Return the MAC hast as described above.
3111 */
3112static unsigned char sxg_mcast_get_mac_hash(char *macaddr)
3113{
3114 u32 crc;
3115 char *p;
3116 int i;
3117 unsigned char machash = 0;
3118
3119 if (!sxg_crc_init) {
3120 sxg_mcast_init_crc32();
3121 sxg_crc_init = 1;
3122 }
3123
3124 crc = 0xFFFFFFFF; /* Preload shift register, per crc-32 spec */
3125 for (i = 0, p = macaddr; i < 6; ++p, ++i) {
3126 crc = (crc >> 8) ^ sxg_crc_table[(crc ^ *p) & 0xFF];
3127 }
3128
3129 /* Return bits 1-8, transposed */
3130 for (i = 1; i < 9; i++) {
3131 machash |= (((crc >> i) & 1) << (8 - i));
3132 }
3133
3134 return (machash);
3135}
3136
J.R. Mauro73b07062008-10-28 18:42:02 -04003137static void sxg_mcast_set_mask(struct adapter_t *adapter)
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -07003138{
Mithlesh Thukral942798b2009-01-05 21:14:34 +05303139 struct sxg_ucode_regs *sxg_regs = adapter->UcodeRegs;
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -07003140
Mithlesh Thukralb040b072009-01-28 07:08:11 +05303141 DBG_ERROR("%s ENTER (%s) MacFilter[%x] mask[%llx]\n", __FUNCTION__,
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -07003142 adapter->netdev->name, (unsigned int)adapter->MacFilter,
3143 adapter->MulticastMask);
3144
3145 if (adapter->MacFilter & (MAC_ALLMCAST | MAC_PROMISC)) {
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05303146 /*
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303147 * Turn on all multicast addresses. We have to do this for
3148 * promiscuous mode as well as ALLMCAST mode. It saves the
3149 * Microcode from having keep state about the MAC configuration
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -07003150 */
Mithlesh Thukralb040b072009-01-28 07:08:11 +05303151 /* DBG_ERROR("sxg: %s MacFilter = MAC_ALLMCAST | MAC_PROMISC\n \
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303152 * SLUT MODE!!!\n",__func__);
3153 */
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -07003154 WRITE_REG(sxg_regs->McastLow, 0xFFFFFFFF, FLUSH);
3155 WRITE_REG(sxg_regs->McastHigh, 0xFFFFFFFF, FLUSH);
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303156 /* DBG_ERROR("%s (%s) WRITE to slic_regs slic_mcastlow&high \
3157 * 0xFFFFFFFF\n",__func__, adapter->netdev->name);
3158 */
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -07003159
3160 } else {
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05303161 /*
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303162 * Commit our multicast mast to the SLIC by writing to the
3163 * multicast address mask registers
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -07003164 */
3165 DBG_ERROR("%s (%s) WRITE mcastlow[%lx] mcasthigh[%lx]\n",
3166 __func__, adapter->netdev->name,
3167 ((ulong) (adapter->MulticastMask & 0xFFFFFFFF)),
3168 ((ulong)
3169 ((adapter->MulticastMask >> 32) & 0xFFFFFFFF)));
3170
3171 WRITE_REG(sxg_regs->McastLow,
3172 (u32) (adapter->MulticastMask & 0xFFFFFFFF), FLUSH);
3173 WRITE_REG(sxg_regs->McastHigh,
3174 (u32) ((adapter->
3175 MulticastMask >> 32) & 0xFFFFFFFF), FLUSH);
3176 }
3177}
3178
J.R. Mauro73b07062008-10-28 18:42:02 -04003179static void sxg_mcast_set_bit(struct adapter_t *adapter, char *address)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003180{
3181 unsigned char crcpoly;
3182
3183 /* Get the CRC polynomial for the mac address */
3184 crcpoly = sxg_mcast_get_mac_hash(address);
3185
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05303186 /*
3187 * We only have space on the SLIC for 64 entries. Lop
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003188 * off the top two bits. (2^6 = 64)
3189 */
3190 crcpoly &= 0x3F;
3191
3192 /* OR in the new bit into our 64 bit mask. */
3193 adapter->MulticastMask |= (u64) 1 << crcpoly;
3194}
Mithlesh Thukralb040b072009-01-28 07:08:11 +05303195
3196/*
3197 * Function takes MAC addresses from dev_mc_list and generates the Mask
3198 */
3199
3200static void sxg_set_mcast_addr(struct adapter_t *adapter)
3201{
3202 struct dev_mc_list *mclist;
3203 struct net_device *dev = adapter->netdev;
3204 int i;
3205
3206 if (adapter->MacFilter & (MAC_ALLMCAST | MAC_MCAST)) {
3207 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
3208 i++, mclist = mclist->next) {
3209 sxg_mcast_set_bit(adapter,mclist->da_addr);
3210 }
3211 }
3212 sxg_mcast_set_mask(adapter);
3213}
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003214
Mithlesh Thukral942798b2009-01-05 21:14:34 +05303215static void sxg_mcast_set_list(struct net_device *dev)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003216{
J.R. Mauro73b07062008-10-28 18:42:02 -04003217 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003218
3219 ASSERT(adapter);
Mithlesh Thukral559990c2009-01-30 20:20:19 +05303220 if (dev->flags & IFF_PROMISC)
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05303221 adapter->MacFilter |= MAC_PROMISC;
Mithlesh Thukralb040b072009-01-28 07:08:11 +05303222 if (dev->flags & IFF_MULTICAST)
3223 adapter->MacFilter |= MAC_MCAST;
Mithlesh Thukral559990c2009-01-30 20:20:19 +05303224 if (dev->flags & IFF_ALLMULTI)
Mithlesh Thukralb040b072009-01-28 07:08:11 +05303225 adapter->MacFilter |= MAC_ALLMCAST;
Mithlesh Thukralb040b072009-01-28 07:08:11 +05303226
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05303227 //XXX handle other flags as well
Mithlesh Thukralb040b072009-01-28 07:08:11 +05303228 sxg_set_mcast_addr(adapter);
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05303229}
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003230
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303231void sxg_free_sgl_buffers(struct adapter_t *adapter)
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303232{
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303233 struct list_entry *ple;
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303234 struct sxg_scatter_gather *Sgl;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003235
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303236 while(!(IsListEmpty(&adapter->AllSglBuffers))) {
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05303237 ple = RemoveHeadList(&adapter->AllSglBuffers);
3238 Sgl = container_of(ple, struct sxg_scatter_gather, AllList);
3239 kfree(Sgl);
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303240 adapter->AllSglBufferCount--;
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303241 }
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303242}
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303243
3244void sxg_free_rcvblocks(struct adapter_t *adapter)
3245{
3246 u32 i;
3247 void *temp_RcvBlock;
3248 struct list_entry *ple;
3249 struct sxg_rcv_block_hdr *RcvBlockHdr;
3250 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
3251 ASSERT((adapter->state == SXG_STATE_INITIALIZING) ||
3252 (adapter->state == SXG_STATE_HALTING));
3253 while(!(IsListEmpty(&adapter->AllRcvBlocks))) {
3254
3255 ple = RemoveHeadList(&adapter->AllRcvBlocks);
3256 RcvBlockHdr = container_of(ple, struct sxg_rcv_block_hdr, AllList);
3257
3258 if(RcvBlockHdr->VirtualAddress) {
3259 temp_RcvBlock = RcvBlockHdr->VirtualAddress;
3260
3261 for(i=0; i< SXG_RCV_DESCRIPTORS_PER_BLOCK;
3262 i++, temp_RcvBlock += SXG_RCV_DATA_HDR_SIZE) {
3263 RcvDataBufferHdr =
3264 (struct sxg_rcv_data_buffer_hdr *)temp_RcvBlock;
3265 SXG_FREE_RCV_PACKET(RcvDataBufferHdr);
3266 }
3267 }
3268
3269 pci_free_consistent(adapter->pcidev,
3270 SXG_RCV_BLOCK_SIZE(SXG_RCV_DATA_HDR_SIZE),
3271 RcvBlockHdr->VirtualAddress,
3272 RcvBlockHdr->PhysicalAddress);
3273 adapter->AllRcvBlockCount--;
3274 }
3275 ASSERT(adapter->AllRcvBlockCount == 0);
3276 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFrRBlk",
3277 adapter, 0, 0, 0);
3278}
3279void sxg_free_mcast_addrs(struct adapter_t *adapter)
3280{
3281 struct sxg_multicast_address *address;
3282 while(adapter->MulticastAddrs) {
3283 address = adapter->MulticastAddrs;
3284 adapter->MulticastAddrs = address->Next;
3285 kfree(address);
3286 }
3287
3288 adapter->MulticastMask= 0;
3289}
3290
3291void sxg_unmap_resources(struct adapter_t *adapter)
3292{
3293 if(adapter->HwRegs) {
3294 iounmap((void *)adapter->HwRegs);
3295 }
3296 if(adapter->UcodeRegs) {
3297 iounmap((void *)adapter->UcodeRegs);
3298 }
3299
3300 ASSERT(adapter->AllRcvBlockCount == 0);
3301 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFrRBlk",
3302 adapter, 0, 0, 0);
3303}
3304
3305
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303306
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003307/*
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303308 * sxg_free_resources - Free everything allocated in SxgAllocateResources
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003309 *
3310 * Arguments -
3311 * adapter - A pointer to our adapter structure
3312 *
3313 * Return
3314 * none
3315 */
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303316void sxg_free_resources(struct adapter_t *adapter)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003317{
3318 u32 RssIds, IsrCount;
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05303319 struct net_device *netdev = adapter->netdev;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003320 RssIds = SXG_RSS_CPU_COUNT(adapter);
3321 IsrCount = adapter->MsiEnabled ? RssIds : 1;
3322
3323 if (adapter->BasicAllocations == FALSE) {
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05303324 /*
3325 * No allocations have been made, including spinlocks,
3326 * or listhead initializations. Return.
3327 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003328 return;
3329 }
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303330
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05303331 /* Free Irq */
3332 free_irq(adapter->netdev->irq, netdev);
3333
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003334 if (!(IsListEmpty(&adapter->AllRcvBlocks))) {
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303335 sxg_free_rcvblocks(adapter);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003336 }
3337 if (!(IsListEmpty(&adapter->AllSglBuffers))) {
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303338 sxg_free_sgl_buffers(adapter);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003339 }
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303340
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003341 if (adapter->XmtRingZeroIndex) {
3342 pci_free_consistent(adapter->pcidev,
3343 sizeof(u32),
3344 adapter->XmtRingZeroIndex,
3345 adapter->PXmtRingZeroIndex);
3346 }
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303347 if (adapter->Isr) {
3348 pci_free_consistent(adapter->pcidev,
3349 sizeof(u32) * IsrCount,
3350 adapter->Isr, adapter->PIsr);
3351 }
3352
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303353 if (adapter->EventRings) {
3354 pci_free_consistent(adapter->pcidev,
3355 sizeof(struct sxg_event_ring) * RssIds,
3356 adapter->EventRings, adapter->PEventRings);
3357 }
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303358 if (adapter->RcvRings) {
3359 pci_free_consistent(adapter->pcidev,
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303360 sizeof(struct sxg_rcv_ring) * 1,
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303361 adapter->RcvRings,
3362 adapter->PRcvRings);
3363 adapter->RcvRings = NULL;
3364 }
3365
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303366 if(adapter->XmtRings) {
3367 pci_free_consistent(adapter->pcidev,
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303368 sizeof(struct sxg_xmt_ring) * 1,
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303369 adapter->XmtRings,
3370 adapter->PXmtRings);
3371 adapter->XmtRings = NULL;
3372 }
3373
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303374 if (adapter->ucode_stats) {
3375 pci_unmap_single(adapter->pcidev,
3376 sizeof(struct sxg_ucode_stats),
3377 adapter->pucode_stats, PCI_DMA_FROMDEVICE);
3378 adapter->ucode_stats = NULL;
3379 }
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303380
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003381
J.R. Maurob243c4a2008-10-20 19:28:58 -04003382 /* Unmap register spaces */
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303383 sxg_unmap_resources(adapter);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003384
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303385 sxg_free_mcast_addrs(adapter);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003386
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003387 adapter->BasicAllocations = FALSE;
3388
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003389}
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003390
3391/*
3392 * sxg_allocate_complete -
3393 *
3394 * This routine is called when a memory allocation has completed.
3395 *
3396 * Arguments -
J.R. Mauro73b07062008-10-28 18:42:02 -04003397 * struct adapter_t * - Our adapter structure
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003398 * VirtualAddress - Memory virtual address
3399 * PhysicalAddress - Memory physical address
3400 * Length - Length of memory allocated (or 0)
3401 * Context - The type of buffer allocated
3402 *
3403 * Return
3404 * None.
3405 */
Mithlesh Thukral0d414722009-01-19 20:29:59 +05303406static int sxg_allocate_complete(struct adapter_t *adapter,
J.R. Mauro5c7514e2008-10-05 20:38:52 -04003407 void *VirtualAddress,
3408 dma_addr_t PhysicalAddress,
Mithlesh Thukral942798b2009-01-05 21:14:34 +05303409 u32 Length, enum sxg_buffer_type Context)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003410{
Mithlesh Thukral0d414722009-01-19 20:29:59 +05303411 int status = 0;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003412 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocCmp",
3413 adapter, VirtualAddress, Length, Context);
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05303414 ASSERT(atomic_read(&adapter->pending_allocations));
3415 atomic_dec(&adapter->pending_allocations);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003416
3417 switch (Context) {
3418
3419 case SXG_BUFFER_TYPE_RCV:
Mithlesh Thukral0d414722009-01-19 20:29:59 +05303420 status = sxg_allocate_rcvblock_complete(adapter,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003421 VirtualAddress,
3422 PhysicalAddress, Length);
3423 break;
3424 case SXG_BUFFER_TYPE_SGL:
Mithlesh Thukral942798b2009-01-05 21:14:34 +05303425 sxg_allocate_sgl_buffer_complete(adapter, (struct sxg_scatter_gather *)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003426 VirtualAddress,
3427 PhysicalAddress, Length);
3428 break;
3429 }
3430 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlocCmp",
3431 adapter, VirtualAddress, Length, Context);
Mithlesh Thukral0d414722009-01-19 20:29:59 +05303432
3433 return status;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003434}
3435
3436/*
3437 * sxg_allocate_buffer_memory - Shared memory allocation routine used for
3438 * synchronous and asynchronous buffer allocations
3439 *
3440 * Arguments -
3441 * adapter - A pointer to our adapter structure
3442 * Size - block size to allocate
3443 * BufferType - Type of buffer to allocate
3444 *
3445 * Return
3446 * int
3447 */
J.R. Mauro73b07062008-10-28 18:42:02 -04003448static int sxg_allocate_buffer_memory(struct adapter_t *adapter,
Mithlesh Thukral942798b2009-01-05 21:14:34 +05303449 u32 Size, enum sxg_buffer_type BufferType)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003450{
3451 int status;
J.R. Mauro5c7514e2008-10-05 20:38:52 -04003452 void *Buffer;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003453 dma_addr_t pBuffer;
3454
3455 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocMem",
3456 adapter, Size, BufferType, 0);
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05303457 /*
3458 * Grab the adapter lock and check the state. If we're in anything other
3459 * than INITIALIZING or RUNNING state, fail. This is to prevent
3460 * allocations in an improper driver state
3461 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003462
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05303463 atomic_inc(&adapter->pending_allocations);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003464
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303465 if(BufferType != SXG_BUFFER_TYPE_SGL)
3466 Buffer = pci_alloc_consistent(adapter->pcidev, Size, &pBuffer);
3467 else {
3468 Buffer = kzalloc(Size, GFP_ATOMIC);
Mithlesh Thukral54aed112009-01-19 20:27:17 +05303469 pBuffer = (dma_addr_t)NULL;
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303470 }
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003471 if (Buffer == NULL) {
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05303472 /*
3473 * Decrement the AllocationsPending count while holding
3474 * the lock. Pause processing relies on this
3475 */
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05303476 atomic_dec(&adapter->pending_allocations);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003477 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlcMemF1",
3478 adapter, Size, BufferType, 0);
3479 return (STATUS_RESOURCES);
3480 }
Mithlesh Thukral0d414722009-01-19 20:29:59 +05303481 status = sxg_allocate_complete(adapter, Buffer, pBuffer, Size, BufferType);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003482
3483 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlocMem",
3484 adapter, Size, BufferType, status);
Mithlesh Thukral0d414722009-01-19 20:29:59 +05303485 return status;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003486}
3487
3488/*
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303489 * sxg_allocate_rcvblock_complete - Complete a receive descriptor
3490 * block allocation
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003491 *
3492 * Arguments -
3493 * adapter - A pointer to our adapter structure
3494 * RcvBlock - receive block virtual address
3495 * PhysicalAddress - Physical address
3496 * Length - Memory length
3497 *
3498 * Return
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003499 */
Mithlesh Thukral0d414722009-01-19 20:29:59 +05303500static int sxg_allocate_rcvblock_complete(struct adapter_t *adapter,
J.R. Mauro5c7514e2008-10-05 20:38:52 -04003501 void *RcvBlock,
3502 dma_addr_t PhysicalAddress,
3503 u32 Length)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003504{
3505 u32 i;
3506 u32 BufferSize = adapter->ReceiveBufferSize;
3507 u64 Paddr;
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303508 void *temp_RcvBlock;
Mithlesh Thukral942798b2009-01-05 21:14:34 +05303509 struct sxg_rcv_block_hdr *RcvBlockHdr;
Mithlesh Thukral942798b2009-01-05 21:14:34 +05303510 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
3511 struct sxg_rcv_descriptor_block *RcvDescriptorBlock;
3512 struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003513
3514 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlRcvBlk",
3515 adapter, RcvBlock, Length, 0);
3516 if (RcvBlock == NULL) {
3517 goto fail;
3518 }
3519 memset(RcvBlock, 0, Length);
3520 ASSERT((BufferSize == SXG_RCV_DATA_BUFFER_SIZE) ||
3521 (BufferSize == SXG_RCV_JUMBO_BUFFER_SIZE));
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303522 ASSERT(Length == SXG_RCV_BLOCK_SIZE(SXG_RCV_DATA_HDR_SIZE));
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05303523 /*
3524 * First, initialize the contained pool of receive data buffers.
3525 * This initialization requires NBL/NB/MDL allocations, if any of them
3526 * fail, free the block and return without queueing the shared memory
3527 */
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303528 //RcvDataBuffer = RcvBlock;
3529 temp_RcvBlock = RcvBlock;
3530 for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
3531 i++, temp_RcvBlock += SXG_RCV_DATA_HDR_SIZE) {
3532 RcvDataBufferHdr = (struct sxg_rcv_data_buffer_hdr *)
3533 temp_RcvBlock;
3534 /* For FREE macro assertion */
3535 RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM;
3536 SXG_ALLOCATE_RCV_PACKET(adapter, RcvDataBufferHdr, BufferSize);
3537 if (RcvDataBufferHdr->SxgDumbRcvPacket == NULL)
3538 goto fail;
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05303539
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303540 }
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003541
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05303542 /*
3543 * Place this entire block of memory on the AllRcvBlocks queue so it
3544 * can be free later
3545 */
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303546
3547 RcvBlockHdr = (struct sxg_rcv_block_hdr *) ((unsigned char *)RcvBlock +
3548 SXG_RCV_BLOCK_HDR_OFFSET(SXG_RCV_DATA_HDR_SIZE));
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003549 RcvBlockHdr->VirtualAddress = RcvBlock;
3550 RcvBlockHdr->PhysicalAddress = PhysicalAddress;
3551 spin_lock(&adapter->RcvQLock);
3552 adapter->AllRcvBlockCount++;
3553 InsertTailList(&adapter->AllRcvBlocks, &RcvBlockHdr->AllList);
3554 spin_unlock(&adapter->RcvQLock);
3555
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303556 /* Now free the contained receive data buffers that we
3557 * initialized above */
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303558 temp_RcvBlock = RcvBlock;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003559 for (i = 0, Paddr = PhysicalAddress;
3560 i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303561 i++, Paddr += SXG_RCV_DATA_HDR_SIZE,
3562 temp_RcvBlock += SXG_RCV_DATA_HDR_SIZE) {
3563 RcvDataBufferHdr =
3564 (struct sxg_rcv_data_buffer_hdr *)temp_RcvBlock;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003565 spin_lock(&adapter->RcvQLock);
3566 SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
3567 spin_unlock(&adapter->RcvQLock);
3568 }
3569
J.R. Maurob243c4a2008-10-20 19:28:58 -04003570 /* Locate the descriptor block and put it on a separate free queue */
J.R. Mauro5c7514e2008-10-05 20:38:52 -04003571 RcvDescriptorBlock =
Mithlesh Thukral942798b2009-01-05 21:14:34 +05303572 (struct sxg_rcv_descriptor_block *) ((unsigned char *)RcvBlock +
J.R. Mauro5c7514e2008-10-05 20:38:52 -04003573 SXG_RCV_DESCRIPTOR_BLOCK_OFFSET
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303574 (SXG_RCV_DATA_HDR_SIZE));
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003575 RcvDescriptorBlockHdr =
Mithlesh Thukral942798b2009-01-05 21:14:34 +05303576 (struct sxg_rcv_descriptor_block_hdr *) ((unsigned char *)RcvBlock +
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003577 SXG_RCV_DESCRIPTOR_BLOCK_HDR_OFFSET
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303578 (SXG_RCV_DATA_HDR_SIZE));
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003579 RcvDescriptorBlockHdr->VirtualAddress = RcvDescriptorBlock;
3580 RcvDescriptorBlockHdr->PhysicalAddress = Paddr;
3581 spin_lock(&adapter->RcvQLock);
3582 SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter, RcvDescriptorBlockHdr);
3583 spin_unlock(&adapter->RcvQLock);
3584 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlRBlk",
3585 adapter, RcvBlock, Length, 0);
Mithlesh Thukral0d414722009-01-19 20:29:59 +05303586 return STATUS_SUCCESS;
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303587fail:
J.R. Maurob243c4a2008-10-20 19:28:58 -04003588 /* Free any allocated resources */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003589 if (RcvBlock) {
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303590 temp_RcvBlock = RcvBlock;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003591 for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303592 i++, temp_RcvBlock += SXG_RCV_DATA_HDR_SIZE) {
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003593 RcvDataBufferHdr =
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303594 (struct sxg_rcv_data_buffer_hdr *)temp_RcvBlock;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003595 SXG_FREE_RCV_PACKET(RcvDataBufferHdr);
3596 }
3597 pci_free_consistent(adapter->pcidev,
3598 Length, RcvBlock, PhysicalAddress);
3599 }
Harvey Harrisone88bd232008-10-17 14:46:10 -07003600 DBG_ERROR("%s: OUT OF RESOURCES\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003601 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "RcvAFail",
3602 adapter, adapter->FreeRcvBufferCount,
3603 adapter->FreeRcvBlockCount, adapter->AllRcvBlockCount);
3604 adapter->Stats.NoMem++;
Mithlesh Thukral0d414722009-01-19 20:29:59 +05303605 /* As allocation failed, free all previously allocated blocks..*/
3606 //sxg_free_rcvblocks(adapter);
3607
3608 return STATUS_RESOURCES;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003609}
3610
3611/*
3612 * sxg_allocate_sgl_buffer_complete - Complete a SGL buffer allocation
3613 *
3614 * Arguments -
3615 * adapter - A pointer to our adapter structure
Mithlesh Thukral942798b2009-01-05 21:14:34 +05303616 * SxgSgl - struct sxg_scatter_gather buffer
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003617 * PhysicalAddress - Physical address
3618 * Length - Memory length
3619 *
3620 * Return
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003621 */
J.R. Mauro73b07062008-10-28 18:42:02 -04003622static void sxg_allocate_sgl_buffer_complete(struct adapter_t *adapter,
Mithlesh Thukral942798b2009-01-05 21:14:34 +05303623 struct sxg_scatter_gather *SxgSgl,
J.R. Mauro5c7514e2008-10-05 20:38:52 -04003624 dma_addr_t PhysicalAddress,
3625 u32 Length)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003626{
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303627 unsigned long sgl_flags;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003628 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlSglCmp",
3629 adapter, SxgSgl, Length, 0);
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303630 if(!in_irq())
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05303631 spin_lock_irqsave(&adapter->SglQLock, sgl_flags);
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303632 else
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05303633 spin_lock(&adapter->SglQLock);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003634 adapter->AllSglBufferCount++;
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303635 /* PhysicalAddress; */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303636 SxgSgl->PhysicalAddress = PhysicalAddress;
3637 /* Initialize backpointer once */
3638 SxgSgl->adapter = adapter;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003639 InsertTailList(&adapter->AllSglBuffers, &SxgSgl->AllList);
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303640 if(!in_irq())
3641 spin_unlock_irqrestore(&adapter->SglQLock, sgl_flags);
3642 else
3643 spin_unlock(&adapter->SglQLock);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003644 SxgSgl->State = SXG_BUFFER_BUSY;
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303645 SXG_FREE_SGL_BUFFER(adapter, SxgSgl, NULL, in_irq());
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003646 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlSgl",
3647 adapter, SxgSgl, Length, 0);
3648}
3649
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003650
Mithlesh Thukral54aed112009-01-19 20:27:17 +05303651static int sxg_adapter_set_hwaddr(struct adapter_t *adapter)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003652{
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303653 /*
3654 * DBG_ERROR ("%s ENTER card->config_set[%x] port[%d] physport[%d] \
3655 * funct#[%d]\n", __func__, card->config_set,
3656 * adapter->port, adapter->physport, adapter->functionnumber);
3657 *
3658 * sxg_dbg_macaddrs(adapter);
3659 */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303660 /* DBG_ERROR ("%s AFTER copying from config.macinfo into currmacaddr\n",
3661 * __FUNCTION__);
3662 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003663
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303664 /* sxg_dbg_macaddrs(adapter); */
3665
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05303666 struct net_device * dev = adapter->netdev;
3667 if(!dev)
3668 {
3669 printk("sxg: Dev is Null\n");
3670 }
3671
3672 DBG_ERROR("%s ENTER (%s)\n", __FUNCTION__, adapter->netdev->name);
3673
3674 if (netif_running(dev)) {
3675 return -EBUSY;
3676 }
3677 if (!adapter) {
3678 return -EBUSY;
3679 }
3680
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003681 if (!(adapter->currmacaddr[0] ||
3682 adapter->currmacaddr[1] ||
3683 adapter->currmacaddr[2] ||
3684 adapter->currmacaddr[3] ||
3685 adapter->currmacaddr[4] || adapter->currmacaddr[5])) {
3686 memcpy(adapter->currmacaddr, adapter->macaddr, 6);
3687 }
3688 if (adapter->netdev) {
3689 memcpy(adapter->netdev->dev_addr, adapter->currmacaddr, 6);
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05303690 memcpy(adapter->netdev->perm_addr, adapter->currmacaddr, 6);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003691 }
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303692 /* DBG_ERROR ("%s EXIT port %d\n", __func__, adapter->port); */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003693 sxg_dbg_macaddrs(adapter);
3694
Mithlesh Thukral54aed112009-01-19 20:27:17 +05303695 return 0;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003696}
3697
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -07003698#if XXXTODO
Mithlesh Thukral942798b2009-01-05 21:14:34 +05303699static int sxg_mac_set_address(struct net_device *dev, void *ptr)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003700{
J.R. Mauro73b07062008-10-28 18:42:02 -04003701 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003702 struct sockaddr *addr = ptr;
3703
Harvey Harrisone88bd232008-10-17 14:46:10 -07003704 DBG_ERROR("%s ENTER (%s)\n", __func__, adapter->netdev->name);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003705
3706 if (netif_running(dev)) {
3707 return -EBUSY;
3708 }
3709 if (!adapter) {
3710 return -EBUSY;
3711 }
3712 DBG_ERROR("sxg: %s (%s) curr %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -07003713 __func__, adapter->netdev->name, adapter->currmacaddr[0],
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003714 adapter->currmacaddr[1], adapter->currmacaddr[2],
3715 adapter->currmacaddr[3], adapter->currmacaddr[4],
3716 adapter->currmacaddr[5]);
3717 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3718 memcpy(adapter->currmacaddr, addr->sa_data, dev->addr_len);
3719 DBG_ERROR("sxg: %s (%s) new %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -07003720 __func__, adapter->netdev->name, adapter->currmacaddr[0],
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003721 adapter->currmacaddr[1], adapter->currmacaddr[2],
3722 adapter->currmacaddr[3], adapter->currmacaddr[4],
3723 adapter->currmacaddr[5]);
3724
3725 sxg_config_set(adapter, TRUE);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003726 return 0;
3727}
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -07003728#endif
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003729
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003730/*
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05303731 * SXG DRIVER FUNCTIONS (below)
3732 *
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003733 * sxg_initialize_adapter - Initialize adapter
3734 *
3735 * Arguments -
3736 * adapter - A pointer to our adapter structure
3737 *
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05303738 * Return - int
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003739 */
J.R. Mauro73b07062008-10-28 18:42:02 -04003740static int sxg_initialize_adapter(struct adapter_t *adapter)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003741{
3742 u32 RssIds, IsrCount;
3743 u32 i;
3744 int status;
3745
3746 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "InitAdpt",
3747 adapter, 0, 0, 0);
3748
J.R. Maurob243c4a2008-10-20 19:28:58 -04003749 RssIds = 1; /* XXXTODO SXG_RSS_CPU_COUNT(adapter); */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003750 IsrCount = adapter->MsiEnabled ? RssIds : 1;
3751
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05303752 /*
3753 * Sanity check SXG_UCODE_REGS structure definition to
3754 * make sure the length is correct
3755 */
Mithlesh Thukral942798b2009-01-05 21:14:34 +05303756 ASSERT(sizeof(struct sxg_ucode_regs) == SXG_REGISTER_SIZE_PER_CPU);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003757
J.R. Maurob243c4a2008-10-20 19:28:58 -04003758 /* Disable interrupts */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003759 SXG_DISABLE_ALL_INTERRUPTS(adapter);
3760
J.R. Maurob243c4a2008-10-20 19:28:58 -04003761 /* Set MTU */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003762 ASSERT((adapter->FrameSize == ETHERMAXFRAME) ||
3763 (adapter->FrameSize == JUMBOMAXFRAME));
3764 WRITE_REG(adapter->UcodeRegs[0].LinkMtu, adapter->FrameSize, TRUE);
3765
J.R. Maurob243c4a2008-10-20 19:28:58 -04003766 /* Set event ring base address and size */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003767 WRITE_REG64(adapter,
3768 adapter->UcodeRegs[0].EventBase, adapter->PEventRings, 0);
3769 WRITE_REG(adapter->UcodeRegs[0].EventSize, EVENT_RING_SIZE, TRUE);
3770
J.R. Maurob243c4a2008-10-20 19:28:58 -04003771 /* Per-ISR initialization */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003772 for (i = 0; i < IsrCount; i++) {
3773 u64 Addr;
J.R. Maurob243c4a2008-10-20 19:28:58 -04003774 /* Set interrupt status pointer */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003775 Addr = adapter->PIsr + (i * sizeof(u32));
3776 WRITE_REG64(adapter, adapter->UcodeRegs[i].Isp, Addr, i);
3777 }
3778
J.R. Maurob243c4a2008-10-20 19:28:58 -04003779 /* XMT ring zero index */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003780 WRITE_REG64(adapter,
3781 adapter->UcodeRegs[0].SPSendIndex,
3782 adapter->PXmtRingZeroIndex, 0);
3783
J.R. Maurob243c4a2008-10-20 19:28:58 -04003784 /* Per-RSS initialization */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003785 for (i = 0; i < RssIds; i++) {
J.R. Maurob243c4a2008-10-20 19:28:58 -04003786 /* Release all event ring entries to the Microcode */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003787 WRITE_REG(adapter->UcodeRegs[i].EventRelease, EVENT_RING_SIZE,
3788 TRUE);
3789 }
3790
J.R. Maurob243c4a2008-10-20 19:28:58 -04003791 /* Transmit ring base and size */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003792 WRITE_REG64(adapter,
3793 adapter->UcodeRegs[0].XmtBase, adapter->PXmtRings, 0);
3794 WRITE_REG(adapter->UcodeRegs[0].XmtSize, SXG_XMT_RING_SIZE, TRUE);
3795
J.R. Maurob243c4a2008-10-20 19:28:58 -04003796 /* Receive ring base and size */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003797 WRITE_REG64(adapter,
3798 adapter->UcodeRegs[0].RcvBase, adapter->PRcvRings, 0);
3799 WRITE_REG(adapter->UcodeRegs[0].RcvSize, SXG_RCV_RING_SIZE, TRUE);
3800
J.R. Maurob243c4a2008-10-20 19:28:58 -04003801 /* Populate the card with receive buffers */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003802 sxg_stock_rcv_buffers(adapter);
3803
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05303804 /*
3805 * Initialize checksum offload capabilities. At the moment we always
3806 * enable IP and TCP receive checksums on the card. Depending on the
3807 * checksum configuration specified by the user, we can choose to
3808 * report or ignore the checksum information provided by the card.
3809 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003810 WRITE_REG(adapter->UcodeRegs[0].ReceiveChecksum,
3811 SXG_RCV_TCP_CSUM_ENABLED | SXG_RCV_IP_CSUM_ENABLED, TRUE);
3812
J.R. Maurob243c4a2008-10-20 19:28:58 -04003813 /* Initialize the MAC, XAUI */
Harvey Harrisone88bd232008-10-17 14:46:10 -07003814 DBG_ERROR("sxg: %s ENTER sxg_initialize_link\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003815 status = sxg_initialize_link(adapter);
Harvey Harrisone88bd232008-10-17 14:46:10 -07003816 DBG_ERROR("sxg: %s EXIT sxg_initialize_link status[%x]\n", __func__,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003817 status);
3818 if (status != STATUS_SUCCESS) {
3819 return (status);
3820 }
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05303821 /*
3822 * Initialize Dead to FALSE.
3823 * SlicCheckForHang or SlicDumpThread will take it from here.
3824 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003825 adapter->Dead = FALSE;
3826 adapter->PingOutstanding = FALSE;
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05303827 adapter->State = SXG_STATE_RUNNING;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003828
3829 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XInit",
3830 adapter, 0, 0, 0);
3831 return (STATUS_SUCCESS);
3832}
3833
3834/*
3835 * sxg_fill_descriptor_block - Populate a descriptor block and give it to
3836 * the card. The caller should hold the RcvQLock
3837 *
3838 * Arguments -
3839 * adapter - A pointer to our adapter structure
3840 * RcvDescriptorBlockHdr - Descriptor block to fill
3841 *
3842 * Return
3843 * status
3844 */
J.R. Mauro73b07062008-10-28 18:42:02 -04003845static int sxg_fill_descriptor_block(struct adapter_t *adapter,
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303846 struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003847{
3848 u32 i;
Mithlesh Thukral942798b2009-01-05 21:14:34 +05303849 struct sxg_ring_info *RcvRingInfo = &adapter->RcvRingZeroInfo;
3850 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
3851 struct sxg_rcv_descriptor_block *RcvDescriptorBlock;
3852 struct sxg_cmd *RingDescriptorCmd;
3853 struct sxg_rcv_ring *RingZero = &adapter->RcvRings[0];
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003854
3855 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "FilBlk",
3856 adapter, adapter->RcvBuffersOnCard,
3857 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
3858
3859 ASSERT(RcvDescriptorBlockHdr);
3860
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05303861 /*
3862 * If we don't have the resources to fill the descriptor block,
3863 * return failure
3864 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003865 if ((adapter->FreeRcvBufferCount < SXG_RCV_DESCRIPTORS_PER_BLOCK) ||
3866 SXG_RING_FULL(RcvRingInfo)) {
3867 adapter->Stats.NoMem++;
3868 return (STATUS_FAILURE);
3869 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04003870 /* Get a ring descriptor command */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003871 SXG_GET_CMD(RingZero,
3872 RcvRingInfo, RingDescriptorCmd, RcvDescriptorBlockHdr);
3873 ASSERT(RingDescriptorCmd);
3874 RcvDescriptorBlockHdr->State = SXG_BUFFER_ONCARD;
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303875 RcvDescriptorBlock = (struct sxg_rcv_descriptor_block *)
3876 RcvDescriptorBlockHdr->VirtualAddress;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003877
J.R. Maurob243c4a2008-10-20 19:28:58 -04003878 /* Fill in the descriptor block */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003879 for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK; i++) {
3880 SXG_GET_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
3881 ASSERT(RcvDataBufferHdr);
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05303882// ASSERT(RcvDataBufferHdr->SxgDumbRcvPacket);
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303883 if (!RcvDataBufferHdr->SxgDumbRcvPacket) {
3884 SXG_ALLOCATE_RCV_PACKET(adapter, RcvDataBufferHdr,
3885 adapter->ReceiveBufferSize);
3886 if(RcvDataBufferHdr->skb)
3887 RcvDataBufferHdr->SxgDumbRcvPacket =
3888 RcvDataBufferHdr->skb;
3889 else
3890 goto no_memory;
3891 }
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003892 SXG_REINIATIALIZE_PACKET(RcvDataBufferHdr->SxgDumbRcvPacket);
3893 RcvDataBufferHdr->State = SXG_BUFFER_ONCARD;
J.R. Mauro5c7514e2008-10-05 20:38:52 -04003894 RcvDescriptorBlock->Descriptors[i].VirtualAddress =
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303895 (void *)RcvDataBufferHdr;
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05303896
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003897 RcvDescriptorBlock->Descriptors[i].PhysicalAddress =
3898 RcvDataBufferHdr->PhysicalAddress;
3899 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04003900 /* Add the descriptor block to receive descriptor ring 0 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003901 RingDescriptorCmd->Sgl = RcvDescriptorBlockHdr->PhysicalAddress;
3902
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05303903 /*
3904 * RcvBuffersOnCard is not protected via the receive lock (see
3905 * sxg_process_event_queue) We don't want to grap a lock every time a
3906 * buffer is returned to us, so we use atomic interlocked functions
3907 * instead.
3908 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003909 adapter->RcvBuffersOnCard += SXG_RCV_DESCRIPTORS_PER_BLOCK;
3910
3911 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DscBlk",
3912 RcvDescriptorBlockHdr,
3913 RingDescriptorCmd, RcvRingInfo->Head, RcvRingInfo->Tail);
3914
3915 WRITE_REG(adapter->UcodeRegs[0].RcvCmd, 1, true);
3916 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFilBlk",
3917 adapter, adapter->RcvBuffersOnCard,
3918 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
3919 return (STATUS_SUCCESS);
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303920no_memory:
3921 return (-ENOMEM);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003922}
3923
3924/*
3925 * sxg_stock_rcv_buffers - Stock the card with receive buffers
3926 *
3927 * Arguments -
3928 * adapter - A pointer to our adapter structure
3929 *
3930 * Return
3931 * None
3932 */
J.R. Mauro73b07062008-10-28 18:42:02 -04003933static void sxg_stock_rcv_buffers(struct adapter_t *adapter)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003934{
Mithlesh Thukral942798b2009-01-05 21:14:34 +05303935 struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003936
3937 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "StockBuf",
3938 adapter, adapter->RcvBuffersOnCard,
3939 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05303940 /*
3941 * First, see if we've got less than our minimum threshold of
3942 * receive buffers, there isn't an allocation in progress, and
3943 * we haven't exceeded our maximum.. get another block of buffers
3944 * None of this needs to be SMP safe. It's round numbers.
3945 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003946 if ((adapter->FreeRcvBufferCount < SXG_MIN_RCV_DATA_BUFFERS) &&
3947 (adapter->AllRcvBlockCount < SXG_MAX_RCV_BLOCKS) &&
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05303948 (atomic_read(&adapter->pending_allocations) == 0)) {
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003949 sxg_allocate_buffer_memory(adapter,
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303950 SXG_RCV_BLOCK_SIZE
3951 (SXG_RCV_DATA_HDR_SIZE),
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003952 SXG_BUFFER_TYPE_RCV);
3953 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04003954 /* Now grab the RcvQLock lock and proceed */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003955 spin_lock(&adapter->RcvQLock);
3956 while (adapter->RcvBuffersOnCard < SXG_RCV_DATA_BUFFERS) {
Mithlesh Thukral942798b2009-01-05 21:14:34 +05303957 struct list_entry *_ple;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003958
J.R. Maurob243c4a2008-10-20 19:28:58 -04003959 /* Get a descriptor block */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003960 RcvDescriptorBlockHdr = NULL;
3961 if (adapter->FreeRcvBlockCount) {
3962 _ple = RemoveHeadList(&adapter->FreeRcvBlocks);
J.R. Mauro5c7514e2008-10-05 20:38:52 -04003963 RcvDescriptorBlockHdr =
Mithlesh Thukral942798b2009-01-05 21:14:34 +05303964 container_of(_ple, struct sxg_rcv_descriptor_block_hdr,
J.R. Mauro5c7514e2008-10-05 20:38:52 -04003965 FreeList);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003966 adapter->FreeRcvBlockCount--;
3967 RcvDescriptorBlockHdr->State = SXG_BUFFER_BUSY;
3968 }
3969
3970 if (RcvDescriptorBlockHdr == NULL) {
J.R. Maurob243c4a2008-10-20 19:28:58 -04003971 /* Bail out.. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003972 adapter->Stats.NoMem++;
3973 break;
3974 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04003975 /* Fill in the descriptor block and give it to the card */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003976 if (sxg_fill_descriptor_block(adapter, RcvDescriptorBlockHdr) ==
3977 STATUS_FAILURE) {
J.R. Maurob243c4a2008-10-20 19:28:58 -04003978 /* Free the descriptor block */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003979 SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter,
3980 RcvDescriptorBlockHdr);
3981 break;
3982 }
3983 }
3984 spin_unlock(&adapter->RcvQLock);
3985 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFilBlks",
3986 adapter, adapter->RcvBuffersOnCard,
3987 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
3988}
3989
3990/*
3991 * sxg_complete_descriptor_blocks - Return descriptor blocks that have been
3992 * completed by the microcode
3993 *
3994 * Arguments -
3995 * adapter - A pointer to our adapter structure
3996 * Index - Where the microcode is up to
3997 *
3998 * Return
3999 * None
4000 */
J.R. Mauro73b07062008-10-28 18:42:02 -04004001static void sxg_complete_descriptor_blocks(struct adapter_t *adapter,
J.R. Mauro5c7514e2008-10-05 20:38:52 -04004002 unsigned char Index)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004003{
Mithlesh Thukral942798b2009-01-05 21:14:34 +05304004 struct sxg_rcv_ring *RingZero = &adapter->RcvRings[0];
4005 struct sxg_ring_info *RcvRingInfo = &adapter->RcvRingZeroInfo;
4006 struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr;
4007 struct sxg_cmd *RingDescriptorCmd;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004008
4009 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpRBlks",
4010 adapter, Index, RcvRingInfo->Head, RcvRingInfo->Tail);
4011
J.R. Maurob243c4a2008-10-20 19:28:58 -04004012 /* Now grab the RcvQLock lock and proceed */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004013 spin_lock(&adapter->RcvQLock);
4014 ASSERT(Index != RcvRingInfo->Tail);
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05304015 while (sxg_ring_get_forward_diff(RcvRingInfo, Index,
4016 RcvRingInfo->Tail) > 3) {
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05304017 /*
4018 * Locate the current Cmd (ring descriptor entry), and
4019 * associated receive descriptor block, and advance
4020 * the tail
4021 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004022 SXG_RETURN_CMD(RingZero,
4023 RcvRingInfo,
4024 RingDescriptorCmd, RcvDescriptorBlockHdr);
4025 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpRBlk",
4026 RcvRingInfo->Head, RcvRingInfo->Tail,
4027 RingDescriptorCmd, RcvDescriptorBlockHdr);
4028
J.R. Maurob243c4a2008-10-20 19:28:58 -04004029 /* Clear the SGL field */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004030 RingDescriptorCmd->Sgl = 0;
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05304031 /*
4032 * Attempt to refill it and hand it right back to the
4033 * card. If we fail to refill it, free the descriptor block
4034 * header. The card will be restocked later via the
4035 * RcvBuffersOnCard test
4036 */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05304037 if (sxg_fill_descriptor_block(adapter,
4038 RcvDescriptorBlockHdr) == STATUS_FAILURE)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004039 SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter,
4040 RcvDescriptorBlockHdr);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004041 }
4042 spin_unlock(&adapter->RcvQLock);
4043 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XCRBlks",
4044 adapter, Index, RcvRingInfo->Head, RcvRingInfo->Tail);
4045}
4046
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05304047/*
4048 * Read the statistics which the card has been maintaining.
4049 */
4050void sxg_collect_statistics(struct adapter_t *adapter)
4051{
4052 if(adapter->ucode_stats)
Mithlesh Thukral54aed112009-01-19 20:27:17 +05304053 WRITE_REG64(adapter, adapter->UcodeRegs[0].GetUcodeStats,
4054 adapter->pucode_stats, 0);
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05304055 adapter->stats.rx_fifo_errors = adapter->ucode_stats->ERDrops;
4056 adapter->stats.rx_over_errors = adapter->ucode_stats->NBDrops;
4057 adapter->stats.tx_fifo_errors = adapter->ucode_stats->XDrops;
4058}
4059
4060static struct net_device_stats *sxg_get_stats(struct net_device * dev)
4061{
4062 struct adapter_t *adapter = netdev_priv(dev);
4063
4064 sxg_collect_statistics(adapter);
4065 return (&adapter->stats);
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05304066}
4067
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004068static struct pci_driver sxg_driver = {
Mithlesh Thukral371d7a92009-01-19 20:22:34 +05304069 .name = sxg_driver_name,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004070 .id_table = sxg_pci_tbl,
4071 .probe = sxg_entry_probe,
4072 .remove = sxg_entry_remove,
4073#if SXG_POWER_MANAGEMENT_ENABLED
4074 .suspend = sxgpm_suspend,
4075 .resume = sxgpm_resume,
4076#endif
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05304077 /* .shutdown = slic_shutdown, MOOK_INVESTIGATE */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004078};
4079
4080static int __init sxg_module_init(void)
4081{
4082 sxg_init_driver();
4083
4084 if (debug >= 0)
4085 sxg_debug = debug;
4086
4087 return pci_register_driver(&sxg_driver);
4088}
4089
4090static void __exit sxg_module_cleanup(void)
4091{
4092 pci_unregister_driver(&sxg_driver);
4093}
4094
4095module_init(sxg_module_init);
4096module_exit(sxg_module_cleanup);