blob: da286f5765cbbdd11517bfc5e2935548bb0db744 [file] [log] [blame]
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001/**************************************************************************
2 *
3 * Copyright (C) 2000-2008 Alacritech, Inc. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above
12 * copyright notice, this list of conditions and the following
13 * disclaimer in the documentation and/or other materials provided
14 * with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY ALACRITECH, INC. ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ALACRITECH, INC. OR
20 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * The views and conclusions contained in the software and documentation
30 * are those of the authors and should not be interpreted as representing
31 * official policies, either expressed or implied, of Alacritech, Inc.
32 *
Mithlesh Thukral0d414722009-01-19 20:29:59 +053033 * Parts developed by LinSysSoft Sahara team
34 *
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -070035 **************************************************************************/
36
37/*
38 * FILENAME: sxg.c
39 *
40 * The SXG driver for Alacritech's 10Gbe products.
41 *
42 * NOTE: This is the standard, non-accelerated version of Alacritech's
43 * IS-NIC driver.
44 */
45
46#include <linux/kernel.h>
47#include <linux/string.h>
48#include <linux/errno.h>
49#include <linux/module.h>
50#include <linux/moduleparam.h>
51#include <linux/ioport.h>
52#include <linux/slab.h>
53#include <linux/interrupt.h>
54#include <linux/timer.h>
55#include <linux/pci.h>
56#include <linux/spinlock.h>
57#include <linux/init.h>
58#include <linux/netdevice.h>
59#include <linux/etherdevice.h>
60#include <linux/ethtool.h>
61#include <linux/skbuff.h>
62#include <linux/delay.h>
63#include <linux/types.h>
64#include <linux/dma-mapping.h>
65#include <linux/mii.h>
Mithlesh Thukral0d414722009-01-19 20:29:59 +053066#include <linux/ip.h>
67#include <linux/in.h>
68#include <linux/tcp.h>
69#include <linux/ipv6.h>
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -070070
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -070071#define SLIC_GET_STATS_ENABLED 0
72#define LINUX_FREES_ADAPTER_RESOURCES 1
73#define SXG_OFFLOAD_IP_CHECKSUM 0
74#define SXG_POWER_MANAGEMENT_ENABLED 0
75#define VPCI 0
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -070076#define ATK_DEBUG 1
77
78#include "sxg_os.h"
79#include "sxghw.h"
80#include "sxghif.h"
81#include "sxg.h"
82#include "sxgdbg.h"
83
84#include "sxgphycode.h"
Mithlesh Thukrala3915dd2009-01-19 20:28:13 +053085#define SXG_UCODE_DBG 0 /* Turn on for debugging */
86#ifdef SXG_UCODE_DBG
87#include "saharadbgdownload.c"
88#include "saharadbgdownloadB.c"
89#else
90#include "saharadownload.c"
91#include "saharadownloadB.c"
92#endif
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -070093
J.R. Mauro73b07062008-10-28 18:42:02 -040094static int sxg_allocate_buffer_memory(struct adapter_t *adapter, u32 Size,
Mithlesh Thukral942798b2009-01-05 21:14:34 +053095 enum sxg_buffer_type BufferType);
Mithlesh Thukral0d414722009-01-19 20:29:59 +053096static int sxg_allocate_rcvblock_complete(struct adapter_t *adapter,
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +053097 void *RcvBlock,
98 dma_addr_t PhysicalAddress,
99 u32 Length);
J.R. Mauro73b07062008-10-28 18:42:02 -0400100static void sxg_allocate_sgl_buffer_complete(struct adapter_t *adapter,
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530101 struct sxg_scatter_gather *SxgSgl,
J.R. Mauro5c7514e2008-10-05 20:38:52 -0400102 dma_addr_t PhysicalAddress,
103 u32 Length);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700104
105static void sxg_mcast_init_crc32(void);
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530106static int sxg_entry_open(struct net_device *dev);
Mithlesh Thukral0d414722009-01-19 20:29:59 +0530107static int sxg_second_open(struct net_device * dev);
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530108static int sxg_entry_halt(struct net_device *dev);
109static int sxg_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
110static int sxg_send_packets(struct sk_buff *skb, struct net_device *dev);
J.R. Mauro73b07062008-10-28 18:42:02 -0400111static int sxg_transmit_packet(struct adapter_t *adapter, struct sk_buff *skb);
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +0530112static int sxg_dumb_sgl(struct sxg_x64_sgl *pSgl,
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530113 struct sxg_scatter_gather *SxgSgl);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700114
Mithlesh Thukralb62a2942009-01-30 20:19:03 +0530115static void sxg_handle_interrupt(struct adapter_t *adapter, int *work_done,
116 int budget);
117static void sxg_interrupt(struct adapter_t *adapter);
118static int sxg_poll(struct napi_struct *napi, int budget);
J.R. Mauro73b07062008-10-28 18:42:02 -0400119static int sxg_process_isr(struct adapter_t *adapter, u32 MessageId);
Mithlesh Thukralb62a2942009-01-30 20:19:03 +0530120static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId,
121 int *sxg_napi_continue, int *work_done, int budget);
Mithlesh Thukralc5e5cf52009-02-06 19:31:40 +0530122static void sxg_complete_slow_send(struct adapter_t *adapter);
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530123static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter,
124 struct sxg_event *Event);
J.R. Mauro73b07062008-10-28 18:42:02 -0400125static void sxg_process_rcv_error(struct adapter_t *adapter, u32 ErrorStatus);
126static bool sxg_mac_filter(struct adapter_t *adapter,
127 struct ether_header *EtherHdr, ushort length);
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +0530128static struct net_device_stats *sxg_get_stats(struct net_device * dev);
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +0530129void sxg_free_resources(struct adapter_t *adapter);
130void sxg_free_rcvblocks(struct adapter_t *adapter);
131void sxg_free_sgl_buffers(struct adapter_t *adapter);
132void sxg_unmap_resources(struct adapter_t *adapter);
133void sxg_free_mcast_addrs(struct adapter_t *adapter);
134void sxg_collect_statistics(struct adapter_t *adapter);
Mithlesh Thukral1782199f2009-02-06 19:32:28 +0530135static int sxg_register_interrupt(struct adapter_t *adapter);
136static void sxg_remove_isr(struct adapter_t *adapter);
137static irqreturn_t sxg_isr(int irq, void *dev_id);
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +0530138
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -0700139#define XXXTODO 0
140
Greg Kroah-Hartman96e70882009-01-21 08:17:45 -0800141#if XXXTODO
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530142static int sxg_mac_set_address(struct net_device *dev, void *ptr);
Greg Kroah-Hartman96e70882009-01-21 08:17:45 -0800143#endif
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530144static void sxg_mcast_set_list(struct net_device *dev);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700145
Mithlesh Thukral54aed112009-01-19 20:27:17 +0530146static int sxg_adapter_set_hwaddr(struct adapter_t *adapter);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700147
J.R. Mauro73b07062008-10-28 18:42:02 -0400148static int sxg_initialize_adapter(struct adapter_t *adapter);
149static void sxg_stock_rcv_buffers(struct adapter_t *adapter);
150static void sxg_complete_descriptor_blocks(struct adapter_t *adapter,
J.R. Mauro5c7514e2008-10-05 20:38:52 -0400151 unsigned char Index);
Mithlesh Thukral7c66b142009-02-06 19:30:40 +0530152int sxg_change_mtu (struct net_device *netdev, int new_mtu);
J.R. Mauro73b07062008-10-28 18:42:02 -0400153static int sxg_initialize_link(struct adapter_t *adapter);
154static int sxg_phy_init(struct adapter_t *adapter);
155static void sxg_link_event(struct adapter_t *adapter);
156static enum SXG_LINK_STATE sxg_get_link_state(struct adapter_t *adapter);
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530157static void sxg_link_state(struct adapter_t *adapter,
158 enum SXG_LINK_STATE LinkState);
J.R. Mauro73b07062008-10-28 18:42:02 -0400159static int sxg_write_mdio_reg(struct adapter_t *adapter,
J.R. Mauro5c7514e2008-10-05 20:38:52 -0400160 u32 DevAddr, u32 RegAddr, u32 Value);
J.R. Mauro73b07062008-10-28 18:42:02 -0400161static int sxg_read_mdio_reg(struct adapter_t *adapter,
J.R. Mauro5c7514e2008-10-05 20:38:52 -0400162 u32 DevAddr, u32 RegAddr, u32 *pValue);
Mithlesh Thukralb040b072009-01-28 07:08:11 +0530163static void sxg_set_mcast_addr(struct adapter_t *adapter);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700164
165static unsigned int sxg_first_init = 1;
166static char *sxg_banner =
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530167 "Alacritech SLIC Technology(tm) Server and Storage \
168 10Gbe Accelerator (Non-Accelerated)\n";
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700169
170static int sxg_debug = 1;
171static int debug = -1;
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530172static struct net_device *head_netdevice = NULL;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700173
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530174static struct sxgbase_driver sxg_global = {
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700175 .dynamic_intagg = 1,
176};
177static int intagg_delay = 100;
178static u32 dynamic_intagg = 0;
179
Mithlesh Thukral54aed112009-01-19 20:27:17 +0530180char sxg_driver_name[] = "sxg_nic";
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700181#define DRV_AUTHOR "Alacritech, Inc. Engineering"
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530182#define DRV_DESCRIPTION \
183 "Alacritech SLIC Techonology(tm) Non-Accelerated 10Gbe Driver"
184#define DRV_COPYRIGHT \
185 "Copyright 2000-2008 Alacritech, Inc. All rights reserved."
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700186
187MODULE_AUTHOR(DRV_AUTHOR);
188MODULE_DESCRIPTION(DRV_DESCRIPTION);
189MODULE_LICENSE("GPL");
190
191module_param(dynamic_intagg, int, 0);
192MODULE_PARM_DESC(dynamic_intagg, "Dynamic Interrupt Aggregation Setting");
193module_param(intagg_delay, int, 0);
194MODULE_PARM_DESC(intagg_delay, "uSec Interrupt Aggregation Delay");
195
196static struct pci_device_id sxg_pci_tbl[] __devinitdata = {
197 {PCI_DEVICE(SXG_VENDOR_ID, SXG_DEVICE_ID)},
198 {0,}
199};
J.R. Mauro5c7514e2008-10-05 20:38:52 -0400200
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700201MODULE_DEVICE_TABLE(pci, sxg_pci_tbl);
202
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700203static inline void sxg_reg32_write(void __iomem *reg, u32 value, bool flush)
204{
205 writel(value, reg);
206 if (flush)
207 mb();
208}
209
J.R. Mauro73b07062008-10-28 18:42:02 -0400210static inline void sxg_reg64_write(struct adapter_t *adapter, void __iomem *reg,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700211 u64 value, u32 cpu)
212{
213 u32 value_high = (u32) (value >> 32);
214 u32 value_low = (u32) (value & 0x00000000FFFFFFFF);
215 unsigned long flags;
216
217 spin_lock_irqsave(&adapter->Bit64RegLock, flags);
218 writel(value_high, (void __iomem *)(&adapter->UcodeRegs[cpu].Upper));
219 writel(value_low, reg);
220 spin_unlock_irqrestore(&adapter->Bit64RegLock, flags);
221}
222
223static void sxg_init_driver(void)
224{
225 if (sxg_first_init) {
226 DBG_ERROR("sxg: %s sxg_first_init set jiffies[%lx]\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -0700227 __func__, jiffies);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700228 sxg_first_init = 0;
229 spin_lock_init(&sxg_global.driver_lock);
230 }
231}
232
J.R. Mauro73b07062008-10-28 18:42:02 -0400233static void sxg_dbg_macaddrs(struct adapter_t *adapter)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700234{
235 DBG_ERROR(" (%s) curr %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
236 adapter->netdev->name, adapter->currmacaddr[0],
237 adapter->currmacaddr[1], adapter->currmacaddr[2],
238 adapter->currmacaddr[3], adapter->currmacaddr[4],
239 adapter->currmacaddr[5]);
240 DBG_ERROR(" (%s) mac %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
241 adapter->netdev->name, adapter->macaddr[0],
242 adapter->macaddr[1], adapter->macaddr[2],
243 adapter->macaddr[3], adapter->macaddr[4],
244 adapter->macaddr[5]);
245 return;
246}
247
J.R. Maurob243c4a2008-10-20 19:28:58 -0400248/* SXG Globals */
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530249static struct sxg_driver SxgDriver;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700250
251#ifdef ATKDBG
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530252static struct sxg_trace_buffer LSxgTraceBuffer;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700253#endif /* ATKDBG */
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530254static struct sxg_trace_buffer *SxgTraceBuffer = NULL;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700255
256/*
Mithlesh Thukral1782199f2009-02-06 19:32:28 +0530257 * MSI Related API's
258 */
259int sxg_register_intr(struct adapter_t *adapter);
260int sxg_enable_msi_x(struct adapter_t *adapter);
261int sxg_add_msi_isr(struct adapter_t *adapter);
262void sxg_remove_msix_isr(struct adapter_t *adapter);
263int sxg_set_interrupt_capability(struct adapter_t *adapter);
264
265int sxg_set_interrupt_capability(struct adapter_t *adapter)
266{
267 int ret;
268
269 ret = sxg_enable_msi_x(adapter);
270 if (ret != STATUS_SUCCESS) {
271 adapter->msi_enabled = FALSE;
272 DBG_ERROR("sxg_set_interrupt_capability MSI-X Disable\n");
273 } else {
274 adapter->msi_enabled = TRUE;
275 DBG_ERROR("sxg_set_interrupt_capability MSI-X Enable\n");
276 }
277 return ret;
278}
279
280int sxg_register_intr(struct adapter_t *adapter)
281{
282 int ret = 0;
283
284 if (adapter->msi_enabled) {
285 ret = sxg_add_msi_isr(adapter);
286 }
287 else {
288 DBG_ERROR("MSI-X Enable Failed. Using Pin INT\n");
289 ret = sxg_register_interrupt(adapter);
290 if (ret != STATUS_SUCCESS) {
291 DBG_ERROR("sxg_register_interrupt Failed\n");
292 }
293 }
294 return ret;
295}
296
297int sxg_enable_msi_x(struct adapter_t *adapter)
298{
299 int ret;
300
301 adapter->nr_msix_entries = 1;
302 adapter->msi_entries = kmalloc(adapter->nr_msix_entries *
303 sizeof(struct msix_entry),GFP_KERNEL);
304 if (!adapter->msi_entries) {
305 DBG_ERROR("%s:MSI Entries memory allocation Failed\n",__func__);
306 return -ENOMEM;
307 }
308 memset(adapter->msi_entries, 0, adapter->nr_msix_entries *
309 sizeof(struct msix_entry));
310
311 ret = pci_enable_msix(adapter->pcidev, adapter->msi_entries,
312 adapter->nr_msix_entries);
313 if (ret) {
314 DBG_ERROR("Enabling MSI-X with %d vectors failed\n",
315 adapter->nr_msix_entries);
316 /*Should try with less vector returned.*/
317 kfree(adapter->msi_entries);
318 return STATUS_FAILURE; /*MSI-X Enable failed.*/
319 }
320 return (STATUS_SUCCESS);
321}
322
323int sxg_add_msi_isr(struct adapter_t *adapter)
324{
325 int ret,i;
326
327 if (!adapter->intrregistered) {
328 for (i=0; i<adapter->nr_msix_entries; i++) {
329 ret = request_irq (adapter->msi_entries[i].vector,
330 sxg_isr,
331 IRQF_SHARED,
332 adapter->netdev->name,
333 adapter->netdev);
334 if (ret) {
335 DBG_ERROR("sxg: MSI-X request_irq (%s) "
336 "FAILED [%x]\n", adapter->netdev->name,
337 ret);
338 return (ret);
339 }
340 }
341 }
342 adapter->msi_enabled = TRUE;
343 adapter->intrregistered = 1;
344 adapter->IntRegistered = TRUE;
345 return (STATUS_SUCCESS);
346}
347
348void sxg_remove_msix_isr(struct adapter_t *adapter)
349{
350 int i,vector;
351 struct net_device *netdev = adapter->netdev;
352
353 for(i=0; i< adapter->nr_msix_entries;i++)
354 {
355 vector = adapter->msi_entries[i].vector;
356 DBG_ERROR("%s : Freeing IRQ vector#%d\n",__FUNCTION__,vector);
357 free_irq(vector,netdev);
358 }
359}
360
361
362static void sxg_remove_isr(struct adapter_t *adapter)
363{
364 struct net_device *netdev = adapter->netdev;
365 if (adapter->msi_enabled)
366 sxg_remove_msix_isr(adapter);
367 else
368 free_irq(adapter->netdev->irq, netdev);
369}
370
371void sxg_reset_interrupt_capability(struct adapter_t *adapter)
372{
373 if (adapter->msi_enabled) {
374 pci_disable_msix(adapter->pcidev);
375 kfree(adapter->msi_entries);
376 adapter->msi_entries = NULL;
377 }
378 return;
379}
380
381/*
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700382 * sxg_download_microcode
383 *
384 * Download Microcode to Sahara adapter
385 *
386 * Arguments -
387 * adapter - A pointer to our adapter structure
388 * UcodeSel - microcode file selection
389 *
390 * Return
391 * int
392 */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530393static bool sxg_download_microcode(struct adapter_t *adapter,
394 enum SXG_UCODE_SEL UcodeSel)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700395{
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530396 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700397 u32 Section;
398 u32 ThisSectionSize;
J.R. Mauro5c7514e2008-10-05 20:38:52 -0400399 u32 *Instruction = NULL;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700400 u32 BaseAddress, AddressOffset, Address;
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530401 /* u32 Failure; */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700402 u32 ValueRead;
403 u32 i;
404 u32 numSections = 0;
405 u32 sectionSize[16];
406 u32 sectionStart[16];
407
408 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DnldUcod",
409 adapter, 0, 0, 0);
Harvey Harrisone88bd232008-10-17 14:46:10 -0700410 DBG_ERROR("sxg: %s ENTER\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700411
412 switch (UcodeSel) {
J.R. Maurob243c4a2008-10-20 19:28:58 -0400413 case SXG_UCODE_SAHARA: /* Sahara operational ucode */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700414 numSections = SNumSections;
415 for (i = 0; i < numSections; i++) {
416 sectionSize[i] = SSectionSize[i];
417 sectionStart[i] = SSectionStart[i];
418 }
419 break;
420 default:
421 printk(KERN_ERR KBUILD_MODNAME
422 ": Woah, big error with the microcode!\n");
423 break;
424 }
425
426 DBG_ERROR("sxg: RESET THE CARD\n");
J.R. Maurob243c4a2008-10-20 19:28:58 -0400427 /* First, reset the card */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700428 WRITE_REG(HwRegs->Reset, 0xDEAD, FLUSH);
429
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +0530430 /*
431 * Download each section of the microcode as specified in
432 * its download file. The *download.c file is generated using
433 * the saharaobjtoc facility which converts the metastep .obj
434 * file to a .c file which contains a two dimentional array.
435 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700436 for (Section = 0; Section < numSections; Section++) {
437 DBG_ERROR("sxg: SECTION # %d\n", Section);
438 switch (UcodeSel) {
439 case SXG_UCODE_SAHARA:
440 Instruction = (u32 *) & SaharaUCode[Section][0];
441 break;
442 default:
443 ASSERT(0);
444 break;
445 }
446 BaseAddress = sectionStart[Section];
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530447 /* Size in instructions */
448 ThisSectionSize = sectionSize[Section] / 12;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700449 for (AddressOffset = 0; AddressOffset < ThisSectionSize;
450 AddressOffset++) {
451 Address = BaseAddress + AddressOffset;
452 ASSERT((Address & ~MICROCODE_ADDRESS_MASK) == 0);
J.R. Maurob243c4a2008-10-20 19:28:58 -0400453 /* Write instruction bits 31 - 0 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700454 WRITE_REG(HwRegs->UcodeDataLow, *Instruction, FLUSH);
J.R. Maurob243c4a2008-10-20 19:28:58 -0400455 /* Write instruction bits 63-32 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700456 WRITE_REG(HwRegs->UcodeDataMiddle, *(Instruction + 1),
457 FLUSH);
J.R. Maurob243c4a2008-10-20 19:28:58 -0400458 /* Write instruction bits 95-64 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700459 WRITE_REG(HwRegs->UcodeDataHigh, *(Instruction + 2),
460 FLUSH);
J.R. Maurob243c4a2008-10-20 19:28:58 -0400461 /* Write instruction address with the WRITE bit set */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700462 WRITE_REG(HwRegs->UcodeAddr,
463 (Address | MICROCODE_ADDRESS_WRITE), FLUSH);
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +0530464 /*
465 * Sahara bug in the ucode download logic - the write to DataLow
466 * for the next instruction could get corrupted. To avoid this,
467 * write to DataLow again for this instruction (which may get
468 * corrupted, but it doesn't matter), then increment the address
469 * and write the data for the next instruction to DataLow. That
470 * write should succeed.
471 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700472 WRITE_REG(HwRegs->UcodeDataLow, *Instruction, TRUE);
J.R. Maurob243c4a2008-10-20 19:28:58 -0400473 /* Advance 3 u32S to start of next instruction */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700474 Instruction += 3;
475 }
476 }
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +0530477 /*
478 * Now repeat the entire operation reading the instruction back and
479 * checking for parity errors
480 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700481 for (Section = 0; Section < numSections; Section++) {
482 DBG_ERROR("sxg: check SECTION # %d\n", Section);
483 switch (UcodeSel) {
484 case SXG_UCODE_SAHARA:
485 Instruction = (u32 *) & SaharaUCode[Section][0];
486 break;
487 default:
488 ASSERT(0);
489 break;
490 }
491 BaseAddress = sectionStart[Section];
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530492 /* Size in instructions */
493 ThisSectionSize = sectionSize[Section] / 12;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700494 for (AddressOffset = 0; AddressOffset < ThisSectionSize;
495 AddressOffset++) {
496 Address = BaseAddress + AddressOffset;
J.R. Maurob243c4a2008-10-20 19:28:58 -0400497 /* Write the address with the READ bit set */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700498 WRITE_REG(HwRegs->UcodeAddr,
499 (Address | MICROCODE_ADDRESS_READ), FLUSH);
J.R. Maurob243c4a2008-10-20 19:28:58 -0400500 /* Read it back and check parity bit. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700501 READ_REG(HwRegs->UcodeAddr, ValueRead);
502 if (ValueRead & MICROCODE_ADDRESS_PARITY) {
503 DBG_ERROR("sxg: %s PARITY ERROR\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -0700504 __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700505
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530506 return FALSE; /* Parity error */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700507 }
508 ASSERT((ValueRead & MICROCODE_ADDRESS_MASK) == Address);
J.R. Maurob243c4a2008-10-20 19:28:58 -0400509 /* Read the instruction back and compare */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700510 READ_REG(HwRegs->UcodeDataLow, ValueRead);
511 if (ValueRead != *Instruction) {
512 DBG_ERROR("sxg: %s MISCOMPARE LOW\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -0700513 __func__);
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530514 return FALSE; /* Miscompare */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700515 }
516 READ_REG(HwRegs->UcodeDataMiddle, ValueRead);
517 if (ValueRead != *(Instruction + 1)) {
518 DBG_ERROR("sxg: %s MISCOMPARE MIDDLE\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -0700519 __func__);
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530520 return FALSE; /* Miscompare */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700521 }
522 READ_REG(HwRegs->UcodeDataHigh, ValueRead);
523 if (ValueRead != *(Instruction + 2)) {
524 DBG_ERROR("sxg: %s MISCOMPARE HIGH\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -0700525 __func__);
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530526 return FALSE; /* Miscompare */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700527 }
J.R. Maurob243c4a2008-10-20 19:28:58 -0400528 /* Advance 3 u32S to start of next instruction */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700529 Instruction += 3;
530 }
531 }
532
J.R. Maurob243c4a2008-10-20 19:28:58 -0400533 /* Everything OK, Go. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700534 WRITE_REG(HwRegs->UcodeAddr, MICROCODE_ADDRESS_GO, FLUSH);
535
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +0530536 /*
537 * Poll the CardUp register to wait for microcode to initialize
538 * Give up after 10,000 attemps (500ms).
539 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700540 for (i = 0; i < 10000; i++) {
541 udelay(50);
542 READ_REG(adapter->UcodeRegs[0].CardUp, ValueRead);
543 if (ValueRead == 0xCAFE) {
Harvey Harrisone88bd232008-10-17 14:46:10 -0700544 DBG_ERROR("sxg: %s BOO YA 0xCAFE\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700545 break;
546 }
547 }
548 if (i == 10000) {
Harvey Harrisone88bd232008-10-17 14:46:10 -0700549 DBG_ERROR("sxg: %s TIMEOUT\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700550
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530551 return FALSE; /* Timeout */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700552 }
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +0530553 /*
554 * Now write the LoadSync register. This is used to
555 * synchronize with the card so it can scribble on the memory
556 * that contained 0xCAFE from the "CardUp" step above
557 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700558 if (UcodeSel == SXG_UCODE_SAHARA) {
559 WRITE_REG(adapter->UcodeRegs[0].LoadSync, 0, FLUSH);
560 }
561
562 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDnldUcd",
563 adapter, 0, 0, 0);
Harvey Harrisone88bd232008-10-17 14:46:10 -0700564 DBG_ERROR("sxg: %s EXIT\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700565
566 return (TRUE);
567}
568
569/*
570 * sxg_allocate_resources - Allocate memory and locks
571 *
572 * Arguments -
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530573 * adapter - A pointer to our adapter structure
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700574 *
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530575 * Return - int
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700576 */
J.R. Mauro73b07062008-10-28 18:42:02 -0400577static int sxg_allocate_resources(struct adapter_t *adapter)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700578{
579 int status;
580 u32 i;
581 u32 RssIds, IsrCount;
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530582 /* struct sxg_xmt_ring *XmtRing; */
583 /* struct sxg_rcv_ring *RcvRing; */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700584
Harvey Harrisone88bd232008-10-17 14:46:10 -0700585 DBG_ERROR("%s ENTER\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700586
587 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocRes",
588 adapter, 0, 0, 0);
589
J.R. Maurob243c4a2008-10-20 19:28:58 -0400590 /* Windows tells us how many CPUs it plans to use for */
591 /* RSS */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700592 RssIds = SXG_RSS_CPU_COUNT(adapter);
Mithlesh Thukral1782199f2009-02-06 19:32:28 +0530593 IsrCount = adapter->msi_enabled ? RssIds : 1;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700594
Harvey Harrisone88bd232008-10-17 14:46:10 -0700595 DBG_ERROR("%s Setup the spinlocks\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700596
J.R. Maurob243c4a2008-10-20 19:28:58 -0400597 /* Allocate spinlocks and initialize listheads first. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700598 spin_lock_init(&adapter->RcvQLock);
599 spin_lock_init(&adapter->SglQLock);
600 spin_lock_init(&adapter->XmtZeroLock);
601 spin_lock_init(&adapter->Bit64RegLock);
602 spin_lock_init(&adapter->AdapterLock);
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +0530603 atomic_set(&adapter->pending_allocations, 0);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700604
Harvey Harrisone88bd232008-10-17 14:46:10 -0700605 DBG_ERROR("%s Setup the lists\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700606
607 InitializeListHead(&adapter->FreeRcvBuffers);
608 InitializeListHead(&adapter->FreeRcvBlocks);
609 InitializeListHead(&adapter->AllRcvBlocks);
610 InitializeListHead(&adapter->FreeSglBuffers);
611 InitializeListHead(&adapter->AllSglBuffers);
612
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +0530613 /*
614 * Mark these basic allocations done. This flags essentially
615 * tells the SxgFreeResources routine that it can grab spinlocks
616 * and reference listheads.
617 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700618 adapter->BasicAllocations = TRUE;
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +0530619 /*
620 * Main allocation loop. Start with the maximum supported by
621 * the microcode and back off if memory allocation
622 * fails. If we hit a minimum, fail.
623 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700624
625 for (;;) {
Greg Kroah-Hartmand78404c2008-10-21 10:41:45 -0700626 DBG_ERROR("%s Allocate XmtRings size[%x]\n", __func__,
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530627 (unsigned int)(sizeof(struct sxg_xmt_ring) * 1));
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700628
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +0530629 /*
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530630 * Start with big items first - receive and transmit rings.
631 * At the moment I'm going to keep the ring size fixed and
632 * adjust the TCBs if we fail. Later we might
633 * consider reducing the ring size as well..
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +0530634 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700635 adapter->XmtRings = pci_alloc_consistent(adapter->pcidev,
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530636 sizeof(struct sxg_xmt_ring) *
637 1,
638 &adapter->PXmtRings);
Harvey Harrisone88bd232008-10-17 14:46:10 -0700639 DBG_ERROR("%s XmtRings[%p]\n", __func__, adapter->XmtRings);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700640
641 if (!adapter->XmtRings) {
642 goto per_tcb_allocation_failed;
643 }
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530644 memset(adapter->XmtRings, 0, sizeof(struct sxg_xmt_ring) * 1);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700645
Greg Kroah-Hartmand78404c2008-10-21 10:41:45 -0700646 DBG_ERROR("%s Allocate RcvRings size[%x]\n", __func__,
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530647 (unsigned int)(sizeof(struct sxg_rcv_ring) * 1));
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700648 adapter->RcvRings =
649 pci_alloc_consistent(adapter->pcidev,
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530650 sizeof(struct sxg_rcv_ring) * 1,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700651 &adapter->PRcvRings);
Harvey Harrisone88bd232008-10-17 14:46:10 -0700652 DBG_ERROR("%s RcvRings[%p]\n", __func__, adapter->RcvRings);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700653 if (!adapter->RcvRings) {
654 goto per_tcb_allocation_failed;
655 }
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530656 memset(adapter->RcvRings, 0, sizeof(struct sxg_rcv_ring) * 1);
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +0530657 adapter->ucode_stats = kzalloc(sizeof(struct sxg_ucode_stats), GFP_ATOMIC);
658 adapter->pucode_stats = pci_map_single(adapter->pcidev,
659 adapter->ucode_stats,
660 sizeof(struct sxg_ucode_stats),
661 PCI_DMA_FROMDEVICE);
662// memset(adapter->ucode_stats, 0, sizeof(struct sxg_ucode_stats));
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700663 break;
664
665 per_tcb_allocation_failed:
J.R. Maurob243c4a2008-10-20 19:28:58 -0400666 /* an allocation failed. Free any successful allocations. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700667 if (adapter->XmtRings) {
668 pci_free_consistent(adapter->pcidev,
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530669 sizeof(struct sxg_xmt_ring) * 1,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700670 adapter->XmtRings,
671 adapter->PXmtRings);
672 adapter->XmtRings = NULL;
673 }
674 if (adapter->RcvRings) {
675 pci_free_consistent(adapter->pcidev,
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530676 sizeof(struct sxg_rcv_ring) * 1,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700677 adapter->RcvRings,
678 adapter->PRcvRings);
679 adapter->RcvRings = NULL;
680 }
J.R. Maurob243c4a2008-10-20 19:28:58 -0400681 /* Loop around and try again.... */
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +0530682 if (adapter->ucode_stats) {
683 pci_unmap_single(adapter->pcidev,
684 sizeof(struct sxg_ucode_stats),
685 adapter->pucode_stats, PCI_DMA_FROMDEVICE);
686 adapter->ucode_stats = NULL;
687 }
688
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700689 }
690
Harvey Harrisone88bd232008-10-17 14:46:10 -0700691 DBG_ERROR("%s Initialize RCV ZERO and XMT ZERO rings\n", __func__);
J.R. Maurob243c4a2008-10-20 19:28:58 -0400692 /* Initialize rcv zero and xmt zero rings */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700693 SXG_INITIALIZE_RING(adapter->RcvRingZeroInfo, SXG_RCV_RING_SIZE);
694 SXG_INITIALIZE_RING(adapter->XmtRingZeroInfo, SXG_XMT_RING_SIZE);
695
J.R. Maurob243c4a2008-10-20 19:28:58 -0400696 /* Sanity check receive data structure format */
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +0530697 /* ASSERT((adapter->ReceiveBufferSize == SXG_RCV_DATA_BUFFER_SIZE) ||
698 (adapter->ReceiveBufferSize == SXG_RCV_JUMBO_BUFFER_SIZE)); */
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530699 ASSERT(sizeof(struct sxg_rcv_descriptor_block) ==
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700700 SXG_RCV_DESCRIPTOR_BLOCK_SIZE);
701
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +0530702 /*
703 * Allocate receive data buffers. We allocate a block of buffers and
704 * a corresponding descriptor block at once. See sxghw.h:SXG_RCV_BLOCK
705 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700706 for (i = 0; i < SXG_INITIAL_RCV_DATA_BUFFERS;
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530707 i += SXG_RCV_DESCRIPTORS_PER_BLOCK) {
Mithlesh Thukral0d414722009-01-19 20:29:59 +0530708 status = sxg_allocate_buffer_memory(adapter,
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +0530709 SXG_RCV_BLOCK_SIZE(SXG_RCV_DATA_HDR_SIZE),
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700710 SXG_BUFFER_TYPE_RCV);
Mithlesh Thukral0d414722009-01-19 20:29:59 +0530711 if (status != STATUS_SUCCESS)
712 return status;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700713 }
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +0530714 /*
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530715 * NBL resource allocation can fail in the 'AllocateComplete' routine,
716 * which doesn't return status. Make sure we got the number of buffers
717 * we requested
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +0530718 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700719 if (adapter->FreeRcvBufferCount < SXG_INITIAL_RCV_DATA_BUFFERS) {
720 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF6",
721 adapter, adapter->FreeRcvBufferCount, SXG_MAX_ENTRIES,
722 0);
723 return (STATUS_RESOURCES);
724 }
725
Greg Kroah-Hartmand78404c2008-10-21 10:41:45 -0700726 DBG_ERROR("%s Allocate EventRings size[%x]\n", __func__,
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530727 (unsigned int)(sizeof(struct sxg_event_ring) * RssIds));
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700728
J.R. Maurob243c4a2008-10-20 19:28:58 -0400729 /* Allocate event queues. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700730 adapter->EventRings = pci_alloc_consistent(adapter->pcidev,
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530731 sizeof(struct sxg_event_ring) *
732 RssIds,
733 &adapter->PEventRings);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700734
735 if (!adapter->EventRings) {
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530736 /* Caller will call SxgFreeAdapter to clean up above
737 * allocations */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700738 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF8",
739 adapter, SXG_MAX_ENTRIES, 0, 0);
740 status = STATUS_RESOURCES;
741 goto per_tcb_allocation_failed;
742 }
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530743 memset(adapter->EventRings, 0, sizeof(struct sxg_event_ring) * RssIds);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700744
Harvey Harrisone88bd232008-10-17 14:46:10 -0700745 DBG_ERROR("%s Allocate ISR size[%x]\n", __func__, IsrCount);
J.R. Maurob243c4a2008-10-20 19:28:58 -0400746 /* Allocate ISR */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700747 adapter->Isr = pci_alloc_consistent(adapter->pcidev,
748 IsrCount, &adapter->PIsr);
749 if (!adapter->Isr) {
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530750 /* Caller will call SxgFreeAdapter to clean up above
751 * allocations */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700752 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF9",
753 adapter, SXG_MAX_ENTRIES, 0, 0);
754 status = STATUS_RESOURCES;
755 goto per_tcb_allocation_failed;
756 }
757 memset(adapter->Isr, 0, sizeof(u32) * IsrCount);
758
Greg Kroah-Hartmand78404c2008-10-21 10:41:45 -0700759 DBG_ERROR("%s Allocate shared XMT ring zero index location size[%x]\n",
760 __func__, (unsigned int)sizeof(u32));
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700761
J.R. Maurob243c4a2008-10-20 19:28:58 -0400762 /* Allocate shared XMT ring zero index location */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700763 adapter->XmtRingZeroIndex = pci_alloc_consistent(adapter->pcidev,
764 sizeof(u32),
765 &adapter->
766 PXmtRingZeroIndex);
767 if (!adapter->XmtRingZeroIndex) {
768 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF10",
769 adapter, SXG_MAX_ENTRIES, 0, 0);
770 status = STATUS_RESOURCES;
771 goto per_tcb_allocation_failed;
772 }
773 memset(adapter->XmtRingZeroIndex, 0, sizeof(u32));
774
775 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlcResS",
776 adapter, SXG_MAX_ENTRIES, 0, 0);
777
Mithlesh Thukral0d414722009-01-19 20:29:59 +0530778 return status;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700779}
780
781/*
782 * sxg_config_pci -
783 *
784 * Set up PCI Configuration space
785 *
786 * Arguments -
787 * pcidev - A pointer to our adapter structure
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700788 */
789static void sxg_config_pci(struct pci_dev *pcidev)
790{
791 u16 pci_command;
792 u16 new_command;
793
794 pci_read_config_word(pcidev, PCI_COMMAND, &pci_command);
Harvey Harrisone88bd232008-10-17 14:46:10 -0700795 DBG_ERROR("sxg: %s PCI command[%4.4x]\n", __func__, pci_command);
J.R. Maurob243c4a2008-10-20 19:28:58 -0400796 /* Set the command register */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530797 new_command = pci_command | (
798 /* Memory Space Enable */
799 PCI_COMMAND_MEMORY |
800 /* Bus master enable */
801 PCI_COMMAND_MASTER |
802 /* Memory write and invalidate */
803 PCI_COMMAND_INVALIDATE |
804 /* Parity error response */
805 PCI_COMMAND_PARITY |
806 /* System ERR */
807 PCI_COMMAND_SERR |
808 /* Fast back-to-back */
809 PCI_COMMAND_FAST_BACK);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700810 if (pci_command != new_command) {
811 DBG_ERROR("%s -- Updating PCI COMMAND register %4.4x->%4.4x.\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -0700812 __func__, pci_command, new_command);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700813 pci_write_config_word(pcidev, PCI_COMMAND, new_command);
814 }
815}
816
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +0530817/*
818 * sxg_read_config
819 * @adapter : Pointer to the adapter structure for the card
820 * This function will read the configuration data from EEPROM/FLASH
821 */
822static inline int sxg_read_config(struct adapter_t *adapter)
823{
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +0530824 /* struct sxg_config data; */
Mithlesh Thukral942798b2009-01-05 21:14:34 +0530825 struct sw_cfg_data *data;
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +0530826 dma_addr_t p_addr;
827 unsigned long status;
828 unsigned long i;
829
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530830 data = pci_alloc_consistent(adapter->pcidev,
831 sizeof(struct sw_cfg_data), &p_addr);
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +0530832 if(!data) {
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +0530833 /*
834 * We cant get even this much memory. Raise a hell
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +0530835 * Get out of here
836 */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530837 printk(KERN_ERR"%s : Could not allocate memory for reading \
838 EEPROM\n", __FUNCTION__);
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +0530839 return -ENOMEM;
840 }
841
842 WRITE_REG(adapter->UcodeRegs[0].ConfigStat, SXG_CFG_TIMEOUT, TRUE);
843
844 WRITE_REG64(adapter, adapter->UcodeRegs[0].Config, p_addr, 0);
845 for(i=0; i<1000; i++) {
846 READ_REG(adapter->UcodeRegs[0].ConfigStat, status);
847 if (status != SXG_CFG_TIMEOUT)
848 break;
849 mdelay(1); /* Do we really need this */
850 }
851
852 switch(status) {
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530853 /* Config read from EEPROM succeeded */
854 case SXG_CFG_LOAD_EEPROM:
855 /* Config read from Flash succeeded */
856 case SXG_CFG_LOAD_FLASH:
857 /* Copy the MAC address to adapter structure */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530858 /* TODO: We are not doing the remaining part : FRU,
859 * etc
860 */
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +0530861 memcpy(adapter->macaddr, data->MacAddr[0].MacAddr,
862 sizeof(struct sxg_config_mac));
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530863 break;
864 case SXG_CFG_TIMEOUT:
865 case SXG_CFG_LOAD_INVALID:
866 case SXG_CFG_LOAD_ERROR:
867 default: /* Fix default handler later */
868 printk(KERN_WARNING"%s : We could not read the config \
869 word. Status = %ld\n", __FUNCTION__, status);
870 break;
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +0530871 }
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530872 pci_free_consistent(adapter->pcidev, sizeof(struct sw_cfg_data), data,
873 p_addr);
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +0530874 if (adapter->netdev) {
875 memcpy(adapter->netdev->dev_addr, adapter->currmacaddr, 6);
876 memcpy(adapter->netdev->perm_addr, adapter->currmacaddr, 6);
877 }
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +0530878 sxg_dbg_macaddrs(adapter);
879
880 return status;
881}
882
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700883static int sxg_entry_probe(struct pci_dev *pcidev,
884 const struct pci_device_id *pci_tbl_entry)
885{
886 static int did_version = 0;
887 int err;
888 struct net_device *netdev;
J.R. Mauro73b07062008-10-28 18:42:02 -0400889 struct adapter_t *adapter;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700890 void __iomem *memmapped_ioaddr;
891 u32 status = 0;
892 ulong mmio_start = 0;
893 ulong mmio_len = 0;
894
895 DBG_ERROR("sxg: %s 2.6 VERSION ENTER jiffies[%lx] cpu %d\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -0700896 __func__, jiffies, smp_processor_id());
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700897
J.R. Maurob243c4a2008-10-20 19:28:58 -0400898 /* Initialize trace buffer */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700899#ifdef ATKDBG
900 SxgTraceBuffer = &LSxgTraceBuffer;
901 SXG_TRACE_INIT(SxgTraceBuffer, TRACE_NOISY);
902#endif
903
904 sxg_global.dynamic_intagg = dynamic_intagg;
905
906 err = pci_enable_device(pcidev);
907
908 DBG_ERROR("Call pci_enable_device(%p) status[%x]\n", pcidev, err);
909 if (err) {
910 return err;
911 }
912
913 if (sxg_debug > 0 && did_version++ == 0) {
914 printk(KERN_INFO "%s\n", sxg_banner);
Mithlesh Thukral371d7a92009-01-19 20:22:34 +0530915 printk(KERN_INFO "%s\n", SXG_DRV_VERSION);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700916 }
917
918 if (!(err = pci_set_dma_mask(pcidev, DMA_64BIT_MASK))) {
919 DBG_ERROR("pci_set_dma_mask(DMA_64BIT_MASK) successful\n");
920 } else {
921 if ((err = pci_set_dma_mask(pcidev, DMA_32BIT_MASK))) {
922 DBG_ERROR
923 ("No usable DMA configuration, aborting err[%x]\n",
924 err);
925 return err;
926 }
927 DBG_ERROR("pci_set_dma_mask(DMA_32BIT_MASK) successful\n");
928 }
929
930 DBG_ERROR("Call pci_request_regions\n");
931
Mithlesh Thukral371d7a92009-01-19 20:22:34 +0530932 err = pci_request_regions(pcidev, sxg_driver_name);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700933 if (err) {
934 DBG_ERROR("pci_request_regions FAILED err[%x]\n", err);
935 return err;
936 }
937
938 DBG_ERROR("call pci_set_master\n");
939 pci_set_master(pcidev);
940
941 DBG_ERROR("call alloc_etherdev\n");
J.R. Mauro73b07062008-10-28 18:42:02 -0400942 netdev = alloc_etherdev(sizeof(struct adapter_t));
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700943 if (!netdev) {
944 err = -ENOMEM;
945 goto err_out_exit_sxg_probe;
946 }
947 DBG_ERROR("alloc_etherdev for slic netdev[%p]\n", netdev);
948
949 SET_NETDEV_DEV(netdev, &pcidev->dev);
950
951 pci_set_drvdata(pcidev, netdev);
952 adapter = netdev_priv(netdev);
953 adapter->netdev = netdev;
954 adapter->pcidev = pcidev;
955
956 mmio_start = pci_resource_start(pcidev, 0);
957 mmio_len = pci_resource_len(pcidev, 0);
958
959 DBG_ERROR("sxg: call ioremap(mmio_start[%lx], mmio_len[%lx])\n",
960 mmio_start, mmio_len);
961
962 memmapped_ioaddr = ioremap(mmio_start, mmio_len);
Harvey Harrisone88bd232008-10-17 14:46:10 -0700963 DBG_ERROR("sxg: %s MEMMAPPED_IOADDR [%p]\n", __func__,
J.R. Mauro5c7514e2008-10-05 20:38:52 -0400964 memmapped_ioaddr);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700965 if (!memmapped_ioaddr) {
966 DBG_ERROR("%s cannot remap MMIO region %lx @ %lx\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -0700967 __func__, mmio_len, mmio_start);
Mithlesh Thukral0d414722009-01-19 20:29:59 +0530968 goto err_out_free_mmio_region_0;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700969 }
970
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +0530971 DBG_ERROR("sxg: %s found Alacritech SXG PCI, MMIO at %p, start[%lx] \
972 len[%lx], IRQ %d.\n", __func__, memmapped_ioaddr, mmio_start,
973 mmio_len, pcidev->irq);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700974
J.R. Mauro5c7514e2008-10-05 20:38:52 -0400975 adapter->HwRegs = (void *)memmapped_ioaddr;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700976 adapter->base_addr = memmapped_ioaddr;
977
978 mmio_start = pci_resource_start(pcidev, 2);
979 mmio_len = pci_resource_len(pcidev, 2);
980
981 DBG_ERROR("sxg: call ioremap(mmio_start[%lx], mmio_len[%lx])\n",
982 mmio_start, mmio_len);
983
984 memmapped_ioaddr = ioremap(mmio_start, mmio_len);
J.R. Mauro5c7514e2008-10-05 20:38:52 -0400985 DBG_ERROR("sxg: %s MEMMAPPED_IOADDR [%p]\n", __func__,
986 memmapped_ioaddr);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700987 if (!memmapped_ioaddr) {
988 DBG_ERROR("%s cannot remap MMIO region %lx @ %lx\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -0700989 __func__, mmio_len, mmio_start);
Mithlesh Thukral0d414722009-01-19 20:29:59 +0530990 goto err_out_free_mmio_region_2;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -0700991 }
992
993 DBG_ERROR("sxg: %s found Alacritech SXG PCI, MMIO at %p, "
994 "start[%lx] len[%lx], IRQ %d.\n", __func__,
995 memmapped_ioaddr, mmio_start, mmio_len, pcidev->irq);
996
997 adapter->UcodeRegs = (void *)memmapped_ioaddr;
998
999 adapter->State = SXG_STATE_INITIALIZING;
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301000 /*
1001 * Maintain a list of all adapters anchored by
1002 * the global SxgDriver structure.
1003 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001004 adapter->Next = SxgDriver.Adapters;
1005 SxgDriver.Adapters = adapter;
1006 adapter->AdapterID = ++SxgDriver.AdapterID;
1007
J.R. Maurob243c4a2008-10-20 19:28:58 -04001008 /* Initialize CRC table used to determine multicast hash */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001009 sxg_mcast_init_crc32();
1010
1011 adapter->JumboEnabled = FALSE;
1012 adapter->RssEnabled = FALSE;
1013 if (adapter->JumboEnabled) {
1014 adapter->FrameSize = JUMBOMAXFRAME;
1015 adapter->ReceiveBufferSize = SXG_RCV_JUMBO_BUFFER_SIZE;
1016 } else {
1017 adapter->FrameSize = ETHERMAXFRAME;
1018 adapter->ReceiveBufferSize = SXG_RCV_DATA_BUFFER_SIZE;
1019 }
1020
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301021 /*
1022 * status = SXG_READ_EEPROM(adapter);
1023 * if (!status) {
1024 * goto sxg_init_bad;
1025 * }
1026 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001027
Harvey Harrisone88bd232008-10-17 14:46:10 -07001028 DBG_ERROR("sxg: %s ENTER sxg_config_pci\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001029 sxg_config_pci(pcidev);
Harvey Harrisone88bd232008-10-17 14:46:10 -07001030 DBG_ERROR("sxg: %s EXIT sxg_config_pci\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001031
Harvey Harrisone88bd232008-10-17 14:46:10 -07001032 DBG_ERROR("sxg: %s ENTER sxg_init_driver\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001033 sxg_init_driver();
Harvey Harrisone88bd232008-10-17 14:46:10 -07001034 DBG_ERROR("sxg: %s EXIT sxg_init_driver\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001035
1036 adapter->vendid = pci_tbl_entry->vendor;
1037 adapter->devid = pci_tbl_entry->device;
1038 adapter->subsysid = pci_tbl_entry->subdevice;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001039 adapter->slotnumber = ((pcidev->devfn >> 3) & 0x1F);
1040 adapter->functionnumber = (pcidev->devfn & 0x7);
1041 adapter->memorylength = pci_resource_len(pcidev, 0);
1042 adapter->irq = pcidev->irq;
1043 adapter->next_netdevice = head_netdevice;
1044 head_netdevice = netdev;
J.R. Maurob243c4a2008-10-20 19:28:58 -04001045 adapter->port = 0; /*adapter->functionnumber; */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001046
J.R. Maurob243c4a2008-10-20 19:28:58 -04001047 /* Allocate memory and other resources */
Harvey Harrisone88bd232008-10-17 14:46:10 -07001048 DBG_ERROR("sxg: %s ENTER sxg_allocate_resources\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001049 status = sxg_allocate_resources(adapter);
1050 DBG_ERROR("sxg: %s EXIT sxg_allocate_resources status %x\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -07001051 __func__, status);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001052 if (status != STATUS_SUCCESS) {
1053 goto err_out_unmap;
1054 }
1055
Harvey Harrisone88bd232008-10-17 14:46:10 -07001056 DBG_ERROR("sxg: %s ENTER sxg_download_microcode\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001057 if (sxg_download_microcode(adapter, SXG_UCODE_SAHARA)) {
1058 DBG_ERROR("sxg: %s ENTER sxg_adapter_set_hwaddr\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -07001059 __func__);
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05301060 sxg_read_config(adapter);
Mithlesh Thukral54aed112009-01-19 20:27:17 +05301061 status = sxg_adapter_set_hwaddr(adapter);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001062 } else {
1063 adapter->state = ADAPT_FAIL;
1064 adapter->linkstate = LINK_DOWN;
1065 DBG_ERROR("sxg_download_microcode FAILED status[%x]\n", status);
1066 }
1067
1068 netdev->base_addr = (unsigned long)adapter->base_addr;
1069 netdev->irq = adapter->irq;
1070 netdev->open = sxg_entry_open;
1071 netdev->stop = sxg_entry_halt;
1072 netdev->hard_start_xmit = sxg_send_packets;
1073 netdev->do_ioctl = sxg_ioctl;
Mithlesh Thukral7c66b142009-02-06 19:30:40 +05301074 netdev->change_mtu = sxg_change_mtu;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001075#if XXXTODO
1076 netdev->set_mac_address = sxg_mac_set_address;
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05301077#endif
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001078 netdev->get_stats = sxg_get_stats;
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05301079 netdev->set_multicast_list = sxg_mcast_set_list;
Mithlesh Thukral371d7a92009-01-19 20:22:34 +05301080 SET_ETHTOOL_OPS(netdev, &sxg_nic_ethtool_ops);
Mithlesh Thukral9914f052009-02-18 18:51:29 +05301081 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Mithlesh Thukral1782199f2009-02-06 19:32:28 +05301082 err = sxg_set_interrupt_capability(adapter);
1083 if (err != STATUS_SUCCESS)
1084 DBG_ERROR("Cannot enable MSI-X capability\n");
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001085
1086 strcpy(netdev->name, "eth%d");
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301087 /* strcpy(netdev->name, pci_name(pcidev)); */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001088 if ((err = register_netdev(netdev))) {
1089 DBG_ERROR("Cannot register net device, aborting. %s\n",
1090 netdev->name);
1091 goto err_out_unmap;
1092 }
1093
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05301094 netif_napi_add(netdev, &adapter->napi,
1095 sxg_poll, SXG_NETDEV_WEIGHT);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001096 DBG_ERROR
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301097 ("sxg: %s addr 0x%lx, irq %d, MAC addr \
1098 %02X:%02X:%02X:%02X:%02X:%02X\n",
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001099 netdev->name, netdev->base_addr, pcidev->irq, netdev->dev_addr[0],
1100 netdev->dev_addr[1], netdev->dev_addr[2], netdev->dev_addr[3],
1101 netdev->dev_addr[4], netdev->dev_addr[5]);
1102
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301103 /* sxg_init_bad: */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001104 ASSERT(status == FALSE);
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301105 /* sxg_free_adapter(adapter); */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001106
Harvey Harrisone88bd232008-10-17 14:46:10 -07001107 DBG_ERROR("sxg: %s EXIT status[%x] jiffies[%lx] cpu %d\n", __func__,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001108 status, jiffies, smp_processor_id());
1109 return status;
1110
1111 err_out_unmap:
Mithlesh Thukral0d414722009-01-19 20:29:59 +05301112 sxg_free_resources(adapter);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001113
Mithlesh Thukral0d414722009-01-19 20:29:59 +05301114 err_out_free_mmio_region_2:
1115
1116 mmio_start = pci_resource_start(pcidev, 2);
1117 mmio_len = pci_resource_len(pcidev, 2);
1118 release_mem_region(mmio_start, mmio_len);
1119
1120 err_out_free_mmio_region_0:
1121
1122 mmio_start = pci_resource_start(pcidev, 0);
1123 mmio_len = pci_resource_len(pcidev, 0);
1124
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001125 release_mem_region(mmio_start, mmio_len);
1126
1127 err_out_exit_sxg_probe:
1128
Harvey Harrisone88bd232008-10-17 14:46:10 -07001129 DBG_ERROR("%s EXIT jiffies[%lx] cpu %d\n", __func__, jiffies,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001130 smp_processor_id());
1131
Mithlesh Thukral0d414722009-01-19 20:29:59 +05301132 pci_disable_device(pcidev);
1133 DBG_ERROR("sxg: %s deallocate device\n", __FUNCTION__);
1134 kfree(netdev);
1135 printk("Exit %s, Sxg driver loading failed..\n", __FUNCTION__);
1136
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001137 return -ENODEV;
1138}
1139
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001140/*
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301141 * LINE BASE Interrupt routines..
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001142 *
1143 * sxg_disable_interrupt
1144 *
1145 * DisableInterrupt Handler
1146 *
1147 * Arguments:
1148 *
1149 * adapter: Our adapter structure
1150 *
1151 * Return Value:
1152 * None.
1153 */
J.R. Mauro73b07062008-10-28 18:42:02 -04001154static void sxg_disable_interrupt(struct adapter_t *adapter)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001155{
1156 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DisIntr",
1157 adapter, adapter->InterruptsEnabled, 0, 0);
J.R. Maurob243c4a2008-10-20 19:28:58 -04001158 /* For now, RSS is disabled with line based interrupts */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001159 ASSERT(adapter->RssEnabled == FALSE);
J.R. Maurob243c4a2008-10-20 19:28:58 -04001160 /* Turn off interrupts by writing to the icr register. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001161 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_DISABLE), TRUE);
1162
1163 adapter->InterruptsEnabled = 0;
1164
1165 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDisIntr",
1166 adapter, adapter->InterruptsEnabled, 0, 0);
1167}
1168
1169/*
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001170 * sxg_enable_interrupt
1171 *
1172 * EnableInterrupt Handler
1173 *
1174 * Arguments:
1175 *
1176 * adapter: Our adapter structure
1177 *
1178 * Return Value:
1179 * None.
1180 */
J.R. Mauro73b07062008-10-28 18:42:02 -04001181static void sxg_enable_interrupt(struct adapter_t *adapter)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001182{
1183 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "EnIntr",
1184 adapter, adapter->InterruptsEnabled, 0, 0);
J.R. Maurob243c4a2008-10-20 19:28:58 -04001185 /* For now, RSS is disabled with line based interrupts */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001186 ASSERT(adapter->RssEnabled == FALSE);
J.R. Maurob243c4a2008-10-20 19:28:58 -04001187 /* Turn on interrupts by writing to the icr register. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001188 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_ENABLE), TRUE);
1189
1190 adapter->InterruptsEnabled = 1;
1191
1192 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XEnIntr",
1193 adapter, 0, 0, 0);
1194}
1195
1196/*
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001197 * sxg_isr - Process an line-based interrupt
1198 *
1199 * Arguments:
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301200 * Context - Our adapter structure
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001201 * QueueDefault - Output parameter to queue to default CPU
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301202 * TargetCpus - Output bitmap to schedule DPC's
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001203 *
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301204 * Return Value: TRUE if our interrupt
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001205 */
1206static irqreturn_t sxg_isr(int irq, void *dev_id)
1207{
Mithlesh Thukral942798b2009-01-05 21:14:34 +05301208 struct net_device *dev = (struct net_device *) dev_id;
J.R. Mauro73b07062008-10-28 18:42:02 -04001209 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001210
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05301211 if(adapter->state != ADAPT_UP)
1212 return IRQ_NONE;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001213 adapter->Stats.NumInts++;
1214 if (adapter->Isr[0] == 0) {
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301215 /*
1216 * The SLIC driver used to experience a number of spurious
1217 * interrupts due to the delay associated with the masking of
1218 * the interrupt (we'd bounce back in here). If we see that
1219 * again with Sahara,add a READ_REG of the Icr register after
1220 * the WRITE_REG below.
1221 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001222 adapter->Stats.FalseInts++;
1223 return IRQ_NONE;
1224 }
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301225 /*
1226 * Move the Isr contents and clear the value in
1227 * shared memory, and mask interrupts
1228 */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301229 /* ASSERT(adapter->IsrDpcsPending == 0); */
J.R. Maurob243c4a2008-10-20 19:28:58 -04001230#if XXXTODO /* RSS Stuff */
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301231 /*
1232 * If RSS is enabled and the ISR specifies SXG_ISR_EVENT, then
1233 * schedule DPC's based on event queues.
1234 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001235 if (adapter->RssEnabled && (adapter->IsrCopy[0] & SXG_ISR_EVENT)) {
1236 for (i = 0;
1237 i < adapter->RssSystemInfo->ProcessorInfo.RssCpuCount;
1238 i++) {
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301239 struct sxg_event_ring *EventRing =
1240 &adapter->EventRings[i];
Mithlesh Thukral942798b2009-01-05 21:14:34 +05301241 struct sxg_event *Event =
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001242 &EventRing->Ring[adapter->NextEvent[i]];
J.R. Mauro5c7514e2008-10-05 20:38:52 -04001243 unsigned char Cpu =
1244 adapter->RssSystemInfo->RssIdToCpu[i];
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001245 if (Event->Status & EVENT_STATUS_VALID) {
1246 adapter->IsrDpcsPending++;
1247 CpuMask |= (1 << Cpu);
1248 }
1249 }
1250 }
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301251 /*
1252 * Now, either schedule the CPUs specified by the CpuMask,
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301253 * or queue default
1254 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001255 if (CpuMask) {
1256 *QueueDefault = FALSE;
1257 } else {
1258 adapter->IsrDpcsPending = 1;
1259 *QueueDefault = TRUE;
1260 }
1261 *TargetCpus = CpuMask;
1262#endif
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05301263 sxg_interrupt(adapter);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001264
1265 return IRQ_HANDLED;
1266}
1267
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05301268static void sxg_interrupt(struct adapter_t *adapter)
1269{
1270 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_MASK), TRUE);
1271
Randy Dunlapc1f46a002009-02-11 13:22:56 -08001272 if (napi_schedule_prep(&adapter->napi)) {
1273 __napi_schedule(&adapter->napi);
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05301274 }
1275}
1276
1277static void sxg_handle_interrupt(struct adapter_t *adapter, int *work_done,
1278 int budget)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001279{
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301280 /* unsigned char RssId = 0; */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001281 u32 NewIsr;
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05301282 int sxg_napi_continue = 1;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001283 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "HndlIntr",
1284 adapter, adapter->IsrCopy[0], 0, 0);
J.R. Maurob243c4a2008-10-20 19:28:58 -04001285 /* For now, RSS is disabled with line based interrupts */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001286 ASSERT(adapter->RssEnabled == FALSE);
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05301287
1288 adapter->IsrCopy[0] = adapter->Isr[0];
1289 adapter->Isr[0] = 0;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001290
J.R. Maurob243c4a2008-10-20 19:28:58 -04001291 /* Always process the event queue. */
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05301292 while (sxg_napi_continue)
1293 {
1294 sxg_process_event_queue(adapter,
1295 (adapter->RssEnabled ? /*RssId */ 0 : 0),
1296 &sxg_napi_continue, work_done, budget);
1297 }
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001298
J.R. Maurob243c4a2008-10-20 19:28:58 -04001299#if XXXTODO /* RSS stuff */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001300 if (--adapter->IsrDpcsPending) {
J.R. Maurob243c4a2008-10-20 19:28:58 -04001301 /* We're done. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001302 ASSERT(adapter->RssEnabled);
1303 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DPCsPend",
1304 adapter, 0, 0, 0);
1305 return;
1306 }
1307#endif
J.R. Maurob243c4a2008-10-20 19:28:58 -04001308 /* Last (or only) DPC processes the ISR and clears the interrupt. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001309 NewIsr = sxg_process_isr(adapter, 0);
J.R. Maurob243c4a2008-10-20 19:28:58 -04001310 /* Reenable interrupts */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001311 adapter->IsrCopy[0] = 0;
1312 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "ClearIsr",
1313 adapter, NewIsr, 0, 0);
1314
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001315 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XHndlInt",
1316 adapter, 0, 0, 0);
1317}
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05301318static int sxg_poll(struct napi_struct *napi, int budget)
1319{
1320 struct adapter_t *adapter = container_of(napi, struct adapter_t, napi);
1321 int work_done = 0;
1322
1323 sxg_handle_interrupt(adapter, &work_done, budget);
1324
1325 if (work_done < budget) {
Randy Dunlapc1f46a002009-02-11 13:22:56 -08001326 napi_complete(napi);
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05301327 WRITE_REG(adapter->UcodeRegs[0].Isr, 0, TRUE);
1328 }
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05301329 return work_done;
1330}
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001331
1332/*
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001333 * sxg_process_isr - Process an interrupt. Called from the line-based and
1334 * message based interrupt DPC routines
1335 *
1336 * Arguments:
1337 * adapter - Our adapter structure
1338 * Queue - The ISR that needs processing
1339 *
1340 * Return Value:
1341 * None
1342 */
J.R. Mauro73b07062008-10-28 18:42:02 -04001343static int sxg_process_isr(struct adapter_t *adapter, u32 MessageId)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001344{
1345 u32 Isr = adapter->IsrCopy[MessageId];
1346 u32 NewIsr = 0;
1347
1348 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "ProcIsr",
1349 adapter, Isr, 0, 0);
1350
J.R. Maurob243c4a2008-10-20 19:28:58 -04001351 /* Error */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001352 if (Isr & SXG_ISR_ERR) {
1353 if (Isr & SXG_ISR_PDQF) {
1354 adapter->Stats.PdqFull++;
Harvey Harrisone88bd232008-10-17 14:46:10 -07001355 DBG_ERROR("%s: SXG_ISR_ERR PDQF!!\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001356 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04001357 /* No host buffer */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001358 if (Isr & SXG_ISR_RMISS) {
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301359 /*
1360 * There is a bunch of code in the SLIC driver which
1361 * attempts to process more receive events per DPC
1362 * if we start to fall behind. We'll probablyd
1363 * need to do something similar here, but hold
1364 * off for now. I don't want to make the code more
1365 * complicated than strictly needed.
1366 */
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05301367 adapter->stats.rx_missed_errors++;
Mithlesh Thukral54aed112009-01-19 20:27:17 +05301368 if (adapter->stats.rx_missed_errors< 5) {
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001369 DBG_ERROR("%s: SXG_ISR_ERR RMISS!!\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -07001370 __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001371 }
1372 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04001373 /* Card crash */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001374 if (Isr & SXG_ISR_DEAD) {
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301375 /*
1376 * Set aside the crash info and set the adapter state
1377 * to RESET
1378 */
1379 adapter->CrashCpu = (unsigned char)
1380 ((Isr & SXG_ISR_CPU) >> SXG_ISR_CPU_SHIFT);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001381 adapter->CrashLocation = (ushort) (Isr & SXG_ISR_CRASH);
1382 adapter->Dead = TRUE;
Harvey Harrisone88bd232008-10-17 14:46:10 -07001383 DBG_ERROR("%s: ISR_DEAD %x, CPU: %d\n", __func__,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001384 adapter->CrashLocation, adapter->CrashCpu);
1385 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04001386 /* Event ring full */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001387 if (Isr & SXG_ISR_ERFULL) {
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301388 /*
1389 * Same issue as RMISS, really. This means the
1390 * host is falling behind the card. Need to increase
1391 * event ring size, process more events per interrupt,
1392 * and/or reduce/remove interrupt aggregation.
1393 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001394 adapter->Stats.EventRingFull++;
1395 DBG_ERROR("%s: SXG_ISR_ERR EVENT RING FULL!!\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -07001396 __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001397 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04001398 /* Transmit drop - no DRAM buffers or XMT error */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001399 if (Isr & SXG_ISR_XDROP) {
Harvey Harrisone88bd232008-10-17 14:46:10 -07001400 DBG_ERROR("%s: SXG_ISR_ERR XDROP!!\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001401 }
1402 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04001403 /* Slowpath send completions */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001404 if (Isr & SXG_ISR_SPSEND) {
Mithlesh Thukralc5e5cf52009-02-06 19:31:40 +05301405 sxg_complete_slow_send(adapter);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001406 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04001407 /* Dump */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001408 if (Isr & SXG_ISR_UPC) {
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301409 /* Maybe change when debug is added.. */
Mithlesh Thukral54aed112009-01-19 20:27:17 +05301410// ASSERT(adapter->DumpCmdRunning);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001411 adapter->DumpCmdRunning = FALSE;
1412 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04001413 /* Link event */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001414 if (Isr & SXG_ISR_LINK) {
1415 sxg_link_event(adapter);
1416 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04001417 /* Debug - breakpoint hit */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001418 if (Isr & SXG_ISR_BREAK) {
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301419 /*
1420 * At the moment AGDB isn't written to support interactive
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301421 * debug sessions. When it is, this interrupt will be used to
1422 * signal AGDB that it has hit a breakpoint. For now, ASSERT.
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301423 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001424 ASSERT(0);
1425 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04001426 /* Heartbeat response */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001427 if (Isr & SXG_ISR_PING) {
1428 adapter->PingOutstanding = FALSE;
1429 }
1430 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XProcIsr",
1431 adapter, Isr, NewIsr, 0);
1432
1433 return (NewIsr);
1434}
1435
1436/*
Mithlesh Thukral9914f052009-02-18 18:51:29 +05301437 * sxg_rcv_checksum - Set the checksum for received packet
1438 *
1439 * Arguements:
1440 * @skb - Packet which is receieved
1441 * @Event - Event read from hardware
1442 */
1443
1444void sxg_rcv_checksum(struct sk_buff *skb, struct sxg_event *Event)
1445{
1446 skb->ip_summed = CHECKSUM_NONE;
1447 if(Event->Status & EVENT_STATUS_TCPIP) {
1448 if(!(Event->Status & EVENT_STATUS_TCPBAD)) {
1449 skb->ip_summed = CHECKSUM_UNNECESSARY;
1450 }
1451 if(!(Event->Status & EVENT_STATUS_IPBAD)) {
1452 skb->ip_summed = CHECKSUM_UNNECESSARY;
1453 }
1454 } else if(Event->Status & EVENT_STATUS_IPONLY) {
1455 if(!(Event->Status & EVENT_STATUS_IPBAD)) {
1456 skb->ip_summed = CHECKSUM_UNNECESSARY;
1457 }
1458 }
1459}
1460
1461/*
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001462 * sxg_process_event_queue - Process our event queue
1463 *
1464 * Arguments:
1465 * - adapter - Adapter structure
1466 * - RssId - The event queue requiring processing
1467 *
1468 * Return Value:
1469 * None.
1470 */
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05301471static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId,
1472 int *sxg_napi_continue, int *work_done, int budget)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001473{
Mithlesh Thukral942798b2009-01-05 21:14:34 +05301474 struct sxg_event_ring *EventRing = &adapter->EventRings[RssId];
1475 struct sxg_event *Event = &EventRing->Ring[adapter->NextEvent[RssId]];
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001476 u32 EventsProcessed = 0, Batches = 0;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001477 struct sk_buff *skb;
1478#ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
1479 struct sk_buff *prev_skb = NULL;
1480 struct sk_buff *IndicationList[SXG_RCV_ARRAYSIZE];
1481 u32 Index;
Mithlesh Thukral942798b2009-01-05 21:14:34 +05301482 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001483#endif
1484 u32 ReturnStatus = 0;
Mithlesh Thukral7c66b142009-02-06 19:30:40 +05301485 int sxg_rcv_data_buffers = SXG_RCV_DATA_BUFFERS;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001486
1487 ASSERT((adapter->State == SXG_STATE_RUNNING) ||
1488 (adapter->State == SXG_STATE_PAUSING) ||
1489 (adapter->State == SXG_STATE_PAUSED) ||
1490 (adapter->State == SXG_STATE_HALTING));
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301491 /*
1492 * We may still have unprocessed events on the queue if
1493 * the card crashed. Don't process them.
1494 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001495 if (adapter->Dead) {
1496 return (0);
1497 }
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301498 /*
1499 * In theory there should only be a single processor that
1500 * accesses this queue, and only at interrupt-DPC time. So/
1501 * we shouldn't need a lock for any of this.
1502 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001503 while (Event->Status & EVENT_STATUS_VALID) {
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05301504 (*sxg_napi_continue) = 1;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001505 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "Event",
1506 Event, Event->Code, Event->Status,
1507 adapter->NextEvent);
1508 switch (Event->Code) {
1509 case EVENT_CODE_BUFFERS:
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301510 /* struct sxg_ring_info Head & Tail == unsigned char */
1511 ASSERT(!(Event->CommandIndex & 0xFF00));
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001512 sxg_complete_descriptor_blocks(adapter,
1513 Event->CommandIndex);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001514 break;
1515 case EVENT_CODE_SLOWRCV:
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05301516 (*work_done)++;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001517 --adapter->RcvBuffersOnCard;
1518 if ((skb = sxg_slow_receive(adapter, Event))) {
1519 u32 rx_bytes;
1520#ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
J.R. Maurob243c4a2008-10-20 19:28:58 -04001521 /* Add it to our indication list */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001522 SXG_ADD_RCV_PACKET(adapter, skb, prev_skb,
1523 IndicationList, num_skbs);
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301524 /*
1525 * Linux, we just pass up each skb to the
1526 * protocol above at this point, there is no
1527 * capability of an indication list.
1528 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001529#else
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301530 /* CHECK skb_pull(skb, INIC_RCVBUF_HEADSIZE); */
1531 /* (rcvbuf->length & IRHDDR_FLEN_MSK); */
1532 rx_bytes = Event->Length;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001533 adapter->stats.rx_packets++;
1534 adapter->stats.rx_bytes += rx_bytes;
Mithlesh Thukral9914f052009-02-18 18:51:29 +05301535 sxg_rcv_checksum(skb, Event);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001536 skb->dev = adapter->netdev;
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05301537 netif_receive_skb(skb);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001538#endif
1539 }
1540 break;
1541 default:
1542 DBG_ERROR("%s: ERROR Invalid EventCode %d\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -07001543 __func__, Event->Code);
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301544 /* ASSERT(0); */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001545 }
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301546 /*
1547 * See if we need to restock card receive buffers.
1548 * There are two things to note here:
1549 * First - This test is not SMP safe. The
1550 * adapter->BuffersOnCard field is protected via atomic
1551 * interlocked calls, but we do not protect it with respect
1552 * to these tests. The only way to do that is with a lock,
1553 * and I don't want to grab a lock every time we adjust the
1554 * BuffersOnCard count. Instead, we allow the buffer
1555 * replenishment to be off once in a while. The worst that
1556 * can happen is the card is given on more-or-less descriptor
1557 * block than the arbitrary value we've chosen. No big deal
1558 * In short DO NOT ADD A LOCK HERE, OR WHERE RcvBuffersOnCard
1559 * is adjusted.
1560 * Second - We expect this test to rarely
1561 * evaluate to true. We attempt to refill descriptor blocks
1562 * as they are returned to us (sxg_complete_descriptor_blocks)
1563 * so The only time this should evaluate to true is when
1564 * sxg_complete_descriptor_blocks failed to allocate
1565 * receive buffers.
1566 */
Mithlesh Thukral7c66b142009-02-06 19:30:40 +05301567 if (adapter->JumboEnabled)
1568 sxg_rcv_data_buffers = SXG_JUMBO_RCV_DATA_BUFFERS;
1569
1570 if (adapter->RcvBuffersOnCard < sxg_rcv_data_buffers) {
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001571 sxg_stock_rcv_buffers(adapter);
1572 }
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301573 /*
1574 * It's more efficient to just set this to zero.
1575 * But clearing the top bit saves potential debug info...
1576 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001577 Event->Status &= ~EVENT_STATUS_VALID;
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301578 /* Advance to the next event */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001579 SXG_ADVANCE_INDEX(adapter->NextEvent[RssId], EVENT_RING_SIZE);
1580 Event = &EventRing->Ring[adapter->NextEvent[RssId]];
1581 EventsProcessed++;
1582 if (EventsProcessed == EVENT_RING_BATCH) {
J.R. Maurob243c4a2008-10-20 19:28:58 -04001583 /* Release a batch of events back to the card */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001584 WRITE_REG(adapter->UcodeRegs[RssId].EventRelease,
1585 EVENT_RING_BATCH, FALSE);
1586 EventsProcessed = 0;
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301587 /*
1588 * If we've processed our batch limit, break out of the
1589 * loop and return SXG_ISR_EVENT to arrange for us to
1590 * be called again
1591 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001592 if (Batches++ == EVENT_BATCH_LIMIT) {
1593 SXG_TRACE(TRACE_SXG, SxgTraceBuffer,
1594 TRACE_NOISY, "EvtLimit", Batches,
1595 adapter->NextEvent, 0, 0);
1596 ReturnStatus = SXG_ISR_EVENT;
1597 break;
1598 }
1599 }
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05301600 if (*work_done >= budget) {
1601 WRITE_REG(adapter->UcodeRegs[RssId].EventRelease,
1602 EventsProcessed, FALSE);
1603 EventsProcessed = 0;
1604 (*sxg_napi_continue) = 0;
1605 break;
1606 }
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001607 }
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05301608 if (!(Event->Status & EVENT_STATUS_VALID))
1609 (*sxg_napi_continue) = 0;
1610
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001611#ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
J.R. Maurob243c4a2008-10-20 19:28:58 -04001612 /* Indicate any received dumb-nic frames */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001613 SXG_INDICATE_PACKETS(adapter, IndicationList, num_skbs);
1614#endif
J.R. Maurob243c4a2008-10-20 19:28:58 -04001615 /* Release events back to the card. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001616 if (EventsProcessed) {
1617 WRITE_REG(adapter->UcodeRegs[RssId].EventRelease,
1618 EventsProcessed, FALSE);
1619 }
1620 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XPrcEvnt",
1621 Batches, EventsProcessed, adapter->NextEvent, num_skbs);
1622
1623 return (ReturnStatus);
1624}
1625
1626/*
1627 * sxg_complete_slow_send - Complete slowpath or dumb-nic sends
1628 *
1629 * Arguments -
1630 * adapter - A pointer to our adapter structure
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001631 * Return
1632 * None
1633 */
Mithlesh Thukralc5e5cf52009-02-06 19:31:40 +05301634static void sxg_complete_slow_send(struct adapter_t *adapter)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001635{
Mithlesh Thukral942798b2009-01-05 21:14:34 +05301636 struct sxg_xmt_ring *XmtRing = &adapter->XmtRings[0];
1637 struct sxg_ring_info *XmtRingInfo = &adapter->XmtRingZeroInfo;
J.R. Mauro5c7514e2008-10-05 20:38:52 -04001638 u32 *ContextType;
Mithlesh Thukral942798b2009-01-05 21:14:34 +05301639 struct sxg_cmd *XmtCmd;
Mithlesh Thukral54aed112009-01-19 20:27:17 +05301640 unsigned long flags = 0;
1641 unsigned long sgl_flags = 0;
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05301642 unsigned int processed_count = 0;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001643
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301644 /*
1645 * NOTE - This lock is dropped and regrabbed in this loop.
1646 * This means two different processors can both be running/
1647 * through this loop. Be *very* careful.
1648 */
Mithlesh Thukralc5e5cf52009-02-06 19:31:40 +05301649 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05301650
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001651 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnds",
1652 adapter, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
1653
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05301654 while ((XmtRingInfo->Tail != *adapter->XmtRingZeroIndex)
1655 && processed_count++ < SXG_COMPLETE_SLOW_SEND_LIMIT) {
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301656 /*
1657 * Locate the current Cmd (ring descriptor entry), and
1658 * associated SGL, and advance the tail
1659 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001660 SXG_RETURN_CMD(XmtRing, XmtRingInfo, XmtCmd, ContextType);
1661 ASSERT(ContextType);
1662 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnd",
1663 XmtRingInfo->Head, XmtRingInfo->Tail, XmtCmd, 0);
J.R. Maurob243c4a2008-10-20 19:28:58 -04001664 /* Clear the SGL field. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001665 XmtCmd->Sgl = 0;
1666
1667 switch (*ContextType) {
1668 case SXG_SGL_DUMB:
1669 {
1670 struct sk_buff *skb;
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301671 struct sxg_scatter_gather *SxgSgl =
1672 (struct sxg_scatter_gather *)ContextType;
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05301673 dma64_addr_t FirstSgeAddress;
1674 u32 FirstSgeLength;
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05301675
J.R. Maurob243c4a2008-10-20 19:28:58 -04001676 /* Dumb-nic send. Command context is the dumb-nic SGL */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001677 skb = (struct sk_buff *)ContextType;
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05301678 skb = SxgSgl->DumbPacket;
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05301679 FirstSgeAddress = XmtCmd->Buffer.FirstSgeAddress;
1680 FirstSgeLength = XmtCmd->Buffer.FirstSgeLength;
J.R. Maurob243c4a2008-10-20 19:28:58 -04001681 /* Complete the send */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001682 SXG_TRACE(TRACE_SXG, SxgTraceBuffer,
1683 TRACE_IMPORTANT, "DmSndCmp", skb, 0,
1684 0, 0);
1685 ASSERT(adapter->Stats.XmtQLen);
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301686 /*
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301687 * Now drop the lock and complete the send
1688 * back to Microsoft. We need to drop the lock
1689 * because Microsoft can come back with a
1690 * chimney send, which results in a double trip
1691 * in SxgTcpOuput
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301692 */
Mithlesh Thukralc5e5cf52009-02-06 19:31:40 +05301693 spin_unlock_irqrestore(
1694 &adapter->XmtZeroLock, flags);
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05301695
1696 SxgSgl->DumbPacket = NULL;
1697 SXG_COMPLETE_DUMB_SEND(adapter, skb,
1698 FirstSgeAddress,
1699 FirstSgeLength);
Mithlesh Thukralc5e5cf52009-02-06 19:31:40 +05301700 SXG_FREE_SGL_BUFFER(adapter, SxgSgl, NULL);
J.R. Maurob243c4a2008-10-20 19:28:58 -04001701 /* and reacquire.. */
Mithlesh Thukralc5e5cf52009-02-06 19:31:40 +05301702 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001703 }
1704 break;
1705 default:
1706 ASSERT(0);
1707 }
1708 }
Mithlesh Thukralc5e5cf52009-02-06 19:31:40 +05301709 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001710 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnd",
1711 adapter, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
1712}
1713
1714/*
1715 * sxg_slow_receive
1716 *
1717 * Arguments -
1718 * adapter - A pointer to our adapter structure
1719 * Event - Receive event
1720 *
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301721 * Return - skb
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001722 */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301723static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter,
1724 struct sxg_event *Event)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001725{
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05301726 u32 BufferSize = adapter->ReceiveBufferSize;
Mithlesh Thukral942798b2009-01-05 21:14:34 +05301727 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001728 struct sk_buff *Packet;
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05301729 static int read_counter = 0;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001730
Mithlesh Thukral942798b2009-01-05 21:14:34 +05301731 RcvDataBufferHdr = (struct sxg_rcv_data_buffer_hdr *) Event->HostHandle;
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05301732 if(read_counter++ & 0x100)
1733 {
1734 sxg_collect_statistics(adapter);
1735 read_counter = 0;
1736 }
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001737 ASSERT(RcvDataBufferHdr);
1738 ASSERT(RcvDataBufferHdr->State == SXG_BUFFER_ONCARD);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001739 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "SlowRcv", Event,
1740 RcvDataBufferHdr, RcvDataBufferHdr->State,
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05301741 /*RcvDataBufferHdr->VirtualAddress*/ 0);
J.R. Maurob243c4a2008-10-20 19:28:58 -04001742 /* Drop rcv frames in non-running state */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001743 switch (adapter->State) {
1744 case SXG_STATE_RUNNING:
1745 break;
1746 case SXG_STATE_PAUSING:
1747 case SXG_STATE_PAUSED:
1748 case SXG_STATE_HALTING:
1749 goto drop;
1750 default:
1751 ASSERT(0);
1752 goto drop;
1753 }
1754
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301755 /*
1756 * memcpy(SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
1757 * RcvDataBufferHdr->VirtualAddress, Event->Length);
1758 */
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05301759
J.R. Maurob243c4a2008-10-20 19:28:58 -04001760 /* Change buffer state to UPSTREAM */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001761 RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM;
1762 if (Event->Status & EVENT_STATUS_RCVERR) {
1763 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "RcvError",
1764 Event, Event->Status, Event->HostHandle, 0);
J.R. Maurob243c4a2008-10-20 19:28:58 -04001765 /* XXXTODO - Remove this print later */
J.R. Mauro5c7514e2008-10-05 20:38:52 -04001766 DBG_ERROR("SXG: Receive error %x\n", *(u32 *)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001767 SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr));
J.R. Mauro5c7514e2008-10-05 20:38:52 -04001768 sxg_process_rcv_error(adapter, *(u32 *)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001769 SXG_RECEIVE_DATA_LOCATION
1770 (RcvDataBufferHdr));
1771 goto drop;
1772 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04001773#if XXXTODO /* VLAN stuff */
1774 /* If there's a VLAN tag, extract it and validate it */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301775 if (((struct ether_header *)
1776 (SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr)))->EtherType
1777 == ETHERTYPE_VLAN) {
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001778 if (SxgExtractVlanHeader(adapter, RcvDataBufferHdr, Event) !=
1779 STATUS_SUCCESS) {
1780 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY,
1781 "BadVlan", Event,
1782 SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
1783 Event->Length, 0);
1784 goto drop;
1785 }
1786 }
1787#endif
J.R. Maurob243c4a2008-10-20 19:28:58 -04001788 /* Dumb-nic frame. See if it passes our mac filter and update stats */
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301789
Mithlesh Thukralb040b072009-01-28 07:08:11 +05301790 if (!sxg_mac_filter(adapter,
1791 (struct ether_header *)(SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr)),
1792 Event->Length)) {
1793 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "RcvFiltr",
1794 Event, SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
1795 Event->Length, 0);
1796 goto drop;
1797 }
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001798
1799 Packet = RcvDataBufferHdr->SxgDumbRcvPacket;
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05301800 SXG_ADJUST_RCV_PACKET(Packet, RcvDataBufferHdr, Event);
1801 Packet->protocol = eth_type_trans(Packet, adapter->netdev);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001802
1803 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumbRcv",
1804 RcvDataBufferHdr, Packet, Event->Length, 0);
J.R. Maurob243c4a2008-10-20 19:28:58 -04001805 /* Lastly adjust the receive packet length. */
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05301806 RcvDataBufferHdr->SxgDumbRcvPacket = NULL;
Mithlesh Thukral54aed112009-01-19 20:27:17 +05301807 RcvDataBufferHdr->PhysicalAddress = (dma_addr_t)NULL;
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05301808 SXG_ALLOCATE_RCV_PACKET(adapter, RcvDataBufferHdr, BufferSize);
1809 if (RcvDataBufferHdr->skb)
1810 {
1811 spin_lock(&adapter->RcvQLock);
1812 SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05301813 // adapter->RcvBuffersOnCard ++;
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05301814 spin_unlock(&adapter->RcvQLock);
1815 }
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001816 return (Packet);
1817
1818 drop:
1819 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DropRcv",
1820 RcvDataBufferHdr, Event->Length, 0, 0);
Mithlesh Thukral54aed112009-01-19 20:27:17 +05301821 adapter->stats.rx_dropped++;
1822// adapter->Stats.RcvDiscards++;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001823 spin_lock(&adapter->RcvQLock);
1824 SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
1825 spin_unlock(&adapter->RcvQLock);
1826 return (NULL);
1827}
1828
1829/*
1830 * sxg_process_rcv_error - process receive error and update
1831 * stats
1832 *
1833 * Arguments:
1834 * adapter - Adapter structure
1835 * ErrorStatus - 4-byte receive error status
1836 *
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301837 * Return Value : None
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001838 */
J.R. Mauro73b07062008-10-28 18:42:02 -04001839static void sxg_process_rcv_error(struct adapter_t *adapter, u32 ErrorStatus)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001840{
1841 u32 Error;
1842
Mithlesh Thukral54aed112009-01-19 20:27:17 +05301843 adapter->stats.rx_errors++;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001844
1845 if (ErrorStatus & SXG_RCV_STATUS_TRANSPORT_ERROR) {
1846 Error = ErrorStatus & SXG_RCV_STATUS_TRANSPORT_MASK;
1847 switch (Error) {
1848 case SXG_RCV_STATUS_TRANSPORT_CSUM:
1849 adapter->Stats.TransportCsum++;
1850 break;
1851 case SXG_RCV_STATUS_TRANSPORT_UFLOW:
1852 adapter->Stats.TransportUflow++;
1853 break;
1854 case SXG_RCV_STATUS_TRANSPORT_HDRLEN:
1855 adapter->Stats.TransportHdrLen++;
1856 break;
1857 }
1858 }
1859 if (ErrorStatus & SXG_RCV_STATUS_NETWORK_ERROR) {
1860 Error = ErrorStatus & SXG_RCV_STATUS_NETWORK_MASK;
1861 switch (Error) {
1862 case SXG_RCV_STATUS_NETWORK_CSUM:
1863 adapter->Stats.NetworkCsum++;
1864 break;
1865 case SXG_RCV_STATUS_NETWORK_UFLOW:
1866 adapter->Stats.NetworkUflow++;
1867 break;
1868 case SXG_RCV_STATUS_NETWORK_HDRLEN:
1869 adapter->Stats.NetworkHdrLen++;
1870 break;
1871 }
1872 }
1873 if (ErrorStatus & SXG_RCV_STATUS_PARITY) {
1874 adapter->Stats.Parity++;
1875 }
1876 if (ErrorStatus & SXG_RCV_STATUS_LINK_ERROR) {
1877 Error = ErrorStatus & SXG_RCV_STATUS_LINK_MASK;
1878 switch (Error) {
1879 case SXG_RCV_STATUS_LINK_PARITY:
1880 adapter->Stats.LinkParity++;
1881 break;
1882 case SXG_RCV_STATUS_LINK_EARLY:
1883 adapter->Stats.LinkEarly++;
1884 break;
1885 case SXG_RCV_STATUS_LINK_BUFOFLOW:
1886 adapter->Stats.LinkBufOflow++;
1887 break;
1888 case SXG_RCV_STATUS_LINK_CODE:
1889 adapter->Stats.LinkCode++;
1890 break;
1891 case SXG_RCV_STATUS_LINK_DRIBBLE:
1892 adapter->Stats.LinkDribble++;
1893 break;
1894 case SXG_RCV_STATUS_LINK_CRC:
1895 adapter->Stats.LinkCrc++;
1896 break;
1897 case SXG_RCV_STATUS_LINK_OFLOW:
1898 adapter->Stats.LinkOflow++;
1899 break;
1900 case SXG_RCV_STATUS_LINK_UFLOW:
1901 adapter->Stats.LinkUflow++;
1902 break;
1903 }
1904 }
1905}
1906
1907/*
1908 * sxg_mac_filter
1909 *
1910 * Arguments:
1911 * adapter - Adapter structure
1912 * pether - Ethernet header
1913 * length - Frame length
1914 *
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301915 * Return Value : TRUE if the frame is to be allowed
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001916 */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05301917static bool sxg_mac_filter(struct adapter_t *adapter,
1918 struct ether_header *EtherHdr, ushort length)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001919{
1920 bool EqualAddr;
Mithlesh Thukralb040b072009-01-28 07:08:11 +05301921 struct net_device *dev = adapter->netdev;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001922
1923 if (SXG_MULTICAST_PACKET(EtherHdr)) {
1924 if (SXG_BROADCAST_PACKET(EtherHdr)) {
J.R. Maurob243c4a2008-10-20 19:28:58 -04001925 /* broadcast */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001926 if (adapter->MacFilter & MAC_BCAST) {
1927 adapter->Stats.DumbRcvBcastPkts++;
1928 adapter->Stats.DumbRcvBcastBytes += length;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001929 return (TRUE);
1930 }
1931 } else {
J.R. Maurob243c4a2008-10-20 19:28:58 -04001932 /* multicast */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001933 if (adapter->MacFilter & MAC_ALLMCAST) {
1934 adapter->Stats.DumbRcvMcastPkts++;
1935 adapter->Stats.DumbRcvMcastBytes += length;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001936 return (TRUE);
1937 }
1938 if (adapter->MacFilter & MAC_MCAST) {
Mithlesh Thukralb040b072009-01-28 07:08:11 +05301939 struct dev_mc_list *mclist = dev->mc_list;
1940 while (mclist) {
1941 ETHER_EQ_ADDR(mclist->da_addr,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001942 EtherHdr->ether_dhost,
1943 EqualAddr);
1944 if (EqualAddr) {
1945 adapter->Stats.
1946 DumbRcvMcastPkts++;
1947 adapter->Stats.
1948 DumbRcvMcastBytes += length;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001949 return (TRUE);
1950 }
Mithlesh Thukralb040b072009-01-28 07:08:11 +05301951 mclist = mclist->next;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001952 }
1953 }
1954 }
1955 } else if (adapter->MacFilter & MAC_DIRECTED) {
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05301956 /*
1957 * Not broadcast or multicast. Must be directed at us or
1958 * the card is in promiscuous mode. Either way, consider it
1959 * ours if MAC_DIRECTED is set
1960 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001961 adapter->Stats.DumbRcvUcastPkts++;
1962 adapter->Stats.DumbRcvUcastBytes += length;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001963 return (TRUE);
1964 }
1965 if (adapter->MacFilter & MAC_PROMISC) {
J.R. Maurob243c4a2008-10-20 19:28:58 -04001966 /* Whatever it is, keep it. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001967 return (TRUE);
1968 }
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001969 return (FALSE);
1970}
Mithlesh Thukralb040b072009-01-28 07:08:11 +05301971
J.R. Mauro73b07062008-10-28 18:42:02 -04001972static int sxg_register_interrupt(struct adapter_t *adapter)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001973{
1974 if (!adapter->intrregistered) {
1975 int retval;
1976
1977 DBG_ERROR
1978 ("sxg: %s AllocAdaptRsrcs adapter[%p] dev->irq[%x] %x\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -07001979 __func__, adapter, adapter->netdev->irq, NR_IRQS);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001980
J.R. Mauro5c7514e2008-10-05 20:38:52 -04001981 spin_unlock_irqrestore(&sxg_global.driver_lock,
1982 sxg_global.flags);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001983
1984 retval = request_irq(adapter->netdev->irq,
1985 &sxg_isr,
1986 IRQF_SHARED,
1987 adapter->netdev->name, adapter->netdev);
1988
1989 spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
1990
1991 if (retval) {
1992 DBG_ERROR("sxg: request_irq (%s) FAILED [%x]\n",
1993 adapter->netdev->name, retval);
1994 return (retval);
1995 }
1996 adapter->intrregistered = 1;
1997 adapter->IntRegistered = TRUE;
J.R. Maurob243c4a2008-10-20 19:28:58 -04001998 /* Disable RSS with line-based interrupts */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07001999 adapter->RssEnabled = FALSE;
2000 DBG_ERROR("sxg: %s AllocAdaptRsrcs adapter[%p] dev->irq[%x]\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -07002001 __func__, adapter, adapter->netdev->irq);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002002 }
2003 return (STATUS_SUCCESS);
2004}
2005
J.R. Mauro73b07062008-10-28 18:42:02 -04002006static void sxg_deregister_interrupt(struct adapter_t *adapter)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002007{
Harvey Harrisone88bd232008-10-17 14:46:10 -07002008 DBG_ERROR("sxg: %s ENTER adapter[%p]\n", __func__, adapter);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002009#if XXXTODO
2010 slic_init_cleanup(adapter);
2011#endif
2012 memset(&adapter->stats, 0, sizeof(struct net_device_stats));
2013 adapter->error_interrupts = 0;
2014 adapter->rcv_interrupts = 0;
2015 adapter->xmit_interrupts = 0;
2016 adapter->linkevent_interrupts = 0;
2017 adapter->upr_interrupts = 0;
2018 adapter->num_isrs = 0;
2019 adapter->xmit_completes = 0;
2020 adapter->rcv_broadcasts = 0;
2021 adapter->rcv_multicasts = 0;
2022 adapter->rcv_unicasts = 0;
Harvey Harrisone88bd232008-10-17 14:46:10 -07002023 DBG_ERROR("sxg: %s EXIT\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002024}
2025
2026/*
2027 * sxg_if_init
2028 *
2029 * Perform initialization of our slic interface.
2030 *
2031 */
J.R. Mauro73b07062008-10-28 18:42:02 -04002032static int sxg_if_init(struct adapter_t *adapter)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002033{
Mithlesh Thukral942798b2009-01-05 21:14:34 +05302034 struct net_device *dev = adapter->netdev;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002035 int status = 0;
2036
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05302037 DBG_ERROR("sxg: %s (%s) ENTER states[%d:%d] flags[%x]\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -07002038 __func__, adapter->netdev->name,
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05302039 adapter->state,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002040 adapter->linkstate, dev->flags);
2041
2042 /* adapter should be down at this point */
2043 if (adapter->state != ADAPT_DOWN) {
2044 DBG_ERROR("sxg_if_init adapter->state != ADAPT_DOWN\n");
2045 return (-EIO);
2046 }
2047 ASSERT(adapter->linkstate == LINK_DOWN);
2048
2049 adapter->devflags_prev = dev->flags;
Mithlesh Thukralb040b072009-01-28 07:08:11 +05302050 adapter->MacFilter = MAC_DIRECTED;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002051 if (dev->flags) {
Harvey Harrisone88bd232008-10-17 14:46:10 -07002052 DBG_ERROR("sxg: %s (%s) Set MAC options: ", __func__,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002053 adapter->netdev->name);
2054 if (dev->flags & IFF_BROADCAST) {
Mithlesh Thukralb040b072009-01-28 07:08:11 +05302055 adapter->MacFilter |= MAC_BCAST;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002056 DBG_ERROR("BCAST ");
2057 }
2058 if (dev->flags & IFF_PROMISC) {
Mithlesh Thukralb040b072009-01-28 07:08:11 +05302059 adapter->MacFilter |= MAC_PROMISC;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002060 DBG_ERROR("PROMISC ");
2061 }
2062 if (dev->flags & IFF_ALLMULTI) {
Mithlesh Thukralb040b072009-01-28 07:08:11 +05302063 adapter->MacFilter |= MAC_ALLMCAST;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002064 DBG_ERROR("ALL_MCAST ");
2065 }
2066 if (dev->flags & IFF_MULTICAST) {
Mithlesh Thukralb040b072009-01-28 07:08:11 +05302067 adapter->MacFilter |= MAC_MCAST;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002068 DBG_ERROR("MCAST ");
2069 }
2070 DBG_ERROR("\n");
2071 }
Mithlesh Thukral1782199f2009-02-06 19:32:28 +05302072 status = sxg_register_intr(adapter);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002073 if (status != STATUS_SUCCESS) {
Mithlesh Thukral1782199f2009-02-06 19:32:28 +05302074 DBG_ERROR("sxg_if_init: sxg_register_intr FAILED %x\n",
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002075 status);
2076 sxg_deregister_interrupt(adapter);
2077 return (status);
2078 }
2079
2080 adapter->state = ADAPT_UP;
2081
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302082 /* clear any pending events, then enable interrupts */
Harvey Harrisone88bd232008-10-17 14:46:10 -07002083 DBG_ERROR("sxg: %s ENABLE interrupts(slic)\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002084
2085 return (STATUS_SUCCESS);
2086}
2087
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05302088void sxg_set_interrupt_aggregation(struct adapter_t *adapter)
2089{
2090 /*
2091 * Top bit disables aggregation on xmt (SXG_AGG_XMT_DISABLE).
2092 * Make sure Max is less than 0x8000.
2093 */
2094 adapter->max_aggregation = SXG_MAX_AGG_DEFAULT;
2095 adapter->min_aggregation = SXG_MIN_AGG_DEFAULT;
2096 WRITE_REG(adapter->UcodeRegs[0].Aggregation,
2097 ((adapter->max_aggregation << SXG_MAX_AGG_SHIFT) |
2098 adapter->min_aggregation),
2099 TRUE);
2100}
2101
Mithlesh Thukral942798b2009-01-05 21:14:34 +05302102static int sxg_entry_open(struct net_device *dev)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002103{
J.R. Mauro73b07062008-10-28 18:42:02 -04002104 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002105 int status;
Mithlesh Thukral0d414722009-01-19 20:29:59 +05302106 static int turn;
Mithlesh Thukral7c66b142009-02-06 19:30:40 +05302107 int sxg_initial_rcv_data_buffers = SXG_INITIAL_RCV_DATA_BUFFERS;
2108 int i;
2109
2110 if (adapter->JumboEnabled == TRUE) {
2111 sxg_initial_rcv_data_buffers =
2112 SXG_INITIAL_JUMBO_RCV_DATA_BUFFERS;
2113 SXG_INITIALIZE_RING(adapter->RcvRingZeroInfo,
2114 SXG_JUMBO_RCV_RING_SIZE);
2115 }
2116
2117 /*
2118 * Allocate receive data buffers. We allocate a block of buffers and
2119 * a corresponding descriptor block at once. See sxghw.h:SXG_RCV_BLOCK
2120 */
2121
2122 for (i = 0; i < sxg_initial_rcv_data_buffers;
2123 i += SXG_RCV_DESCRIPTORS_PER_BLOCK)
2124 {
2125 status = sxg_allocate_buffer_memory(adapter,
2126 SXG_RCV_BLOCK_SIZE(SXG_RCV_DATA_HDR_SIZE),
2127 SXG_BUFFER_TYPE_RCV);
2128 if (status != STATUS_SUCCESS)
2129 return status;
2130 }
2131 /*
2132 * NBL resource allocation can fail in the 'AllocateComplete' routine,
2133 * which doesn't return status. Make sure we got the number of buffers
2134 * we requested
2135 */
2136
2137 if (adapter->FreeRcvBufferCount < sxg_initial_rcv_data_buffers) {
2138 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF6",
2139 adapter, adapter->FreeRcvBufferCount, SXG_MAX_ENTRIES,
2140 0);
2141 return (STATUS_RESOURCES);
2142 }
2143 /*
2144 * The microcode expects it to be downloaded on every open.
2145 */
2146 DBG_ERROR("sxg: %s ENTER sxg_download_microcode\n", __FUNCTION__);
2147 if (sxg_download_microcode(adapter, SXG_UCODE_SAHARA)) {
2148 DBG_ERROR("sxg: %s ENTER sxg_adapter_set_hwaddr\n",
2149 __FUNCTION__);
2150 sxg_read_config(adapter);
2151 } else {
2152 adapter->state = ADAPT_FAIL;
2153 adapter->linkstate = LINK_DOWN;
2154 DBG_ERROR("sxg_download_microcode FAILED status[%x]\n",
2155 status);
2156 }
2157 msleep(5);
Mithlesh Thukral0d414722009-01-19 20:29:59 +05302158
2159 if (turn) {
2160 sxg_second_open(adapter->netdev);
2161
2162 return STATUS_SUCCESS;
2163 }
2164
2165 turn++;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002166
2167 ASSERT(adapter);
Harvey Harrisone88bd232008-10-17 14:46:10 -07002168 DBG_ERROR("sxg: %s adapter->activated[%d]\n", __func__,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002169 adapter->activated);
2170 DBG_ERROR
2171 ("sxg: %s (%s): [jiffies[%lx] cpu %d] dev[%p] adapt[%p] port[%d]\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -07002172 __func__, adapter->netdev->name, jiffies, smp_processor_id(),
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002173 adapter->netdev, adapter, adapter->port);
2174
2175 netif_stop_queue(adapter->netdev);
2176
2177 spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
2178 if (!adapter->activated) {
2179 sxg_global.num_sxg_ports_active++;
2180 adapter->activated = 1;
2181 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04002182 /* Initialize the adapter */
Harvey Harrisone88bd232008-10-17 14:46:10 -07002183 DBG_ERROR("sxg: %s ENTER sxg_initialize_adapter\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002184 status = sxg_initialize_adapter(adapter);
2185 DBG_ERROR("sxg: %s EXIT sxg_initialize_adapter status[%x]\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -07002186 __func__, status);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002187
2188 if (status == STATUS_SUCCESS) {
Harvey Harrisone88bd232008-10-17 14:46:10 -07002189 DBG_ERROR("sxg: %s ENTER sxg_if_init\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002190 status = sxg_if_init(adapter);
Harvey Harrisone88bd232008-10-17 14:46:10 -07002191 DBG_ERROR("sxg: %s EXIT sxg_if_init status[%x]\n", __func__,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002192 status);
2193 }
2194
2195 if (status != STATUS_SUCCESS) {
2196 if (adapter->activated) {
2197 sxg_global.num_sxg_ports_active--;
2198 adapter->activated = 0;
2199 }
2200 spin_unlock_irqrestore(&sxg_global.driver_lock,
2201 sxg_global.flags);
2202 return (status);
2203 }
Harvey Harrisone88bd232008-10-17 14:46:10 -07002204 DBG_ERROR("sxg: %s ENABLE ALL INTERRUPTS\n", __func__);
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05302205 sxg_set_interrupt_aggregation(adapter);
2206 napi_enable(&adapter->napi);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002207
J.R. Maurob243c4a2008-10-20 19:28:58 -04002208 /* Enable interrupts */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002209 SXG_ENABLE_ALL_INTERRUPTS(adapter);
2210
Harvey Harrisone88bd232008-10-17 14:46:10 -07002211 DBG_ERROR("sxg: %s EXIT\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002212
2213 spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
2214 return STATUS_SUCCESS;
2215}
2216
Mithlesh Thukral0d414722009-01-19 20:29:59 +05302217int sxg_second_open(struct net_device * dev)
2218{
2219 struct adapter_t *adapter = (struct adapter_t*) netdev_priv(dev);
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05302220 int status = 0;
Mithlesh Thukral0d414722009-01-19 20:29:59 +05302221
2222 spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
2223 netif_start_queue(adapter->netdev);
2224 adapter->state = ADAPT_UP;
2225 adapter->linkstate = LINK_UP;
2226
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05302227 status = sxg_initialize_adapter(adapter);
2228 sxg_set_interrupt_aggregation(adapter);
2229 napi_enable(&adapter->napi);
Mithlesh Thukral0d414722009-01-19 20:29:59 +05302230 /* Re-enable interrupts */
2231 SXG_ENABLE_ALL_INTERRUPTS(adapter);
2232
2233 netif_carrier_on(dev);
Mithlesh Thukral0d414722009-01-19 20:29:59 +05302234 sxg_register_interrupt(adapter);
Mithlesh Thukral1782199f2009-02-06 19:32:28 +05302235 spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
Mithlesh Thukral0d414722009-01-19 20:29:59 +05302236 return (STATUS_SUCCESS);
2237
2238}
2239
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002240static void __devexit sxg_entry_remove(struct pci_dev *pcidev)
2241{
Mithlesh Thukral0d414722009-01-19 20:29:59 +05302242 u32 mmio_start = 0;
2243 u32 mmio_len = 0;
2244
Mithlesh Thukral942798b2009-01-05 21:14:34 +05302245 struct net_device *dev = pci_get_drvdata(pcidev);
J.R. Mauro73b07062008-10-28 18:42:02 -04002246 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05302247
2248 flush_scheduled_work();
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05302249
2250 /* Deallocate Resources */
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302251 unregister_netdev(dev);
Mithlesh Thukral1782199f2009-02-06 19:32:28 +05302252 sxg_reset_interrupt_capability(adapter);
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302253 sxg_free_resources(adapter);
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05302254
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002255 ASSERT(adapter);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002256
Mithlesh Thukral0d414722009-01-19 20:29:59 +05302257 mmio_start = pci_resource_start(pcidev, 0);
2258 mmio_len = pci_resource_len(pcidev, 0);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002259
Mithlesh Thukral0d414722009-01-19 20:29:59 +05302260 DBG_ERROR("sxg: %s rel_region(0) start[%x] len[%x]\n", __FUNCTION__,
2261 mmio_start, mmio_len);
2262 release_mem_region(mmio_start, mmio_len);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002263
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05302264 mmio_start = pci_resource_start(pcidev, 2);
2265 mmio_len = pci_resource_len(pcidev, 2);
2266
2267 DBG_ERROR("sxg: %s rel_region(2) start[%x] len[%x]\n", __FUNCTION__,
2268 mmio_start, mmio_len);
2269 release_mem_region(mmio_start, mmio_len);
2270
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05302271 pci_disable_device(pcidev);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002272
Harvey Harrisone88bd232008-10-17 14:46:10 -07002273 DBG_ERROR("sxg: %s deallocate device\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002274 kfree(dev);
Harvey Harrisone88bd232008-10-17 14:46:10 -07002275 DBG_ERROR("sxg: %s EXIT\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002276}
2277
Mithlesh Thukral942798b2009-01-05 21:14:34 +05302278static int sxg_entry_halt(struct net_device *dev)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002279{
J.R. Mauro73b07062008-10-28 18:42:02 -04002280 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
Mithlesh Thukral7c66b142009-02-06 19:30:40 +05302281 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
2282 int i;
2283 u32 RssIds, IsrCount;
2284 unsigned long flags;
2285
2286 RssIds = SXG_RSS_CPU_COUNT(adapter);
Mithlesh Thukral1782199f2009-02-06 19:32:28 +05302287 IsrCount = adapter->msi_enabled ? RssIds : 1;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002288
Mithlesh Thukralb62a2942009-01-30 20:19:03 +05302289 napi_disable(&adapter->napi);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002290 spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
Harvey Harrisone88bd232008-10-17 14:46:10 -07002291 DBG_ERROR("sxg: %s (%s) ENTER\n", __func__, dev->name);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002292
Mithlesh Thukral7c66b142009-02-06 19:30:40 +05302293 WRITE_REG(adapter->UcodeRegs[0].RcvCmd, 0, true);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002294 netif_stop_queue(adapter->netdev);
2295 adapter->state = ADAPT_DOWN;
2296 adapter->linkstate = LINK_DOWN;
2297 adapter->devflags_prev = 0;
2298 DBG_ERROR("sxg: %s (%s) set adapter[%p] state to ADAPT_DOWN(%d)\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -07002299 __func__, dev->name, adapter, adapter->state);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002300
Mithlesh Thukral7c66b142009-02-06 19:30:40 +05302301 /* Disable interrupts */
2302 SXG_DISABLE_ALL_INTERRUPTS(adapter);
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05302303
Mithlesh Thukral0d414722009-01-19 20:29:59 +05302304 netif_carrier_off(dev);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002305 spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05302306
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302307 sxg_deregister_interrupt(adapter);
Mithlesh Thukral7c66b142009-02-06 19:30:40 +05302308 WRITE_REG(HwRegs->Reset, 0xDEAD, FLUSH);
2309 mdelay(5000);
2310 spin_lock(&adapter->RcvQLock);
2311 /* Free all the blocks and the buffers, moved from remove() routine */
2312 if (!(IsListEmpty(&adapter->AllRcvBlocks))) {
2313 sxg_free_rcvblocks(adapter);
2314 }
2315
2316
2317 InitializeListHead(&adapter->FreeRcvBuffers);
2318 InitializeListHead(&adapter->FreeRcvBlocks);
2319 InitializeListHead(&adapter->AllRcvBlocks);
2320 InitializeListHead(&adapter->FreeSglBuffers);
2321 InitializeListHead(&adapter->AllSglBuffers);
2322
2323 adapter->FreeRcvBufferCount = 0;
2324 adapter->FreeRcvBlockCount = 0;
2325 adapter->AllRcvBlockCount = 0;
2326 adapter->RcvBuffersOnCard = 0;
2327 adapter->PendingRcvCount = 0;
2328
2329 memset(adapter->RcvRings, 0, sizeof(struct sxg_rcv_ring) * 1);
2330 memset(adapter->EventRings, 0, sizeof(struct sxg_event_ring) * RssIds);
2331 memset(adapter->Isr, 0, sizeof(u32) * IsrCount);
2332 for (i = 0; i < SXG_MAX_RING_SIZE; i++)
2333 adapter->RcvRingZeroInfo.Context[i] = NULL;
2334 SXG_INITIALIZE_RING(adapter->RcvRingZeroInfo, SXG_RCV_RING_SIZE);
2335 SXG_INITIALIZE_RING(adapter->XmtRingZeroInfo, SXG_XMT_RING_SIZE);
2336
2337 spin_unlock(&adapter->RcvQLock);
2338
2339 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
2340 adapter->AllSglBufferCount = 0;
2341 adapter->FreeSglBufferCount = 0;
2342 adapter->PendingXmtCount = 0;
2343 memset(adapter->XmtRings, 0, sizeof(struct sxg_xmt_ring) * 1);
2344 memset(adapter->XmtRingZeroIndex, 0, sizeof(u32));
2345 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
2346
Mithlesh Thukral7c66b142009-02-06 19:30:40 +05302347 for (i = 0; i < SXG_MAX_RSS; i++) {
2348 adapter->NextEvent[i] = 0;
2349 }
2350 atomic_set(&adapter->pending_allocations, 0);
Mithlesh Thukral1782199f2009-02-06 19:32:28 +05302351 adapter->intrregistered = 0;
2352 sxg_remove_isr(adapter);
2353 DBG_ERROR("sxg: %s (%s) EXIT\n", __FUNCTION__, dev->name);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002354 return (STATUS_SUCCESS);
2355}
2356
Mithlesh Thukral942798b2009-01-05 21:14:34 +05302357static int sxg_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002358{
2359 ASSERT(rq);
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302360/* DBG_ERROR("sxg: %s cmd[%x] rq[%p] dev[%p]\n", __func__, cmd, rq, dev);*/
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002361 switch (cmd) {
2362 case SIOCSLICSETINTAGG:
2363 {
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302364 /* struct adapter_t *adapter = (struct adapter_t *)
2365 * netdev_priv(dev);
2366 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002367 u32 data[7];
2368 u32 intagg;
2369
2370 if (copy_from_user(data, rq->ifr_data, 28)) {
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302371 DBG_ERROR("copy_from_user FAILED getting \
2372 initial params\n");
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002373 return -EFAULT;
2374 }
2375 intagg = data[0];
2376 printk(KERN_EMERG
2377 "%s: set interrupt aggregation to %d\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -07002378 __func__, intagg);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002379 return 0;
2380 }
2381
2382 default:
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302383 /* DBG_ERROR("sxg: %s UNSUPPORTED[%x]\n", __func__, cmd); */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002384 return -EOPNOTSUPP;
2385 }
2386 return 0;
2387}
2388
2389#define NORMAL_ETHFRAME 0
2390
2391/*
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002392 * sxg_send_packets - Send a skb packet
2393 *
2394 * Arguments:
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302395 * skb - The packet to send
2396 * dev - Our linux net device that refs our adapter
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002397 *
2398 * Return:
2399 * 0 regardless of outcome XXXTODO refer to e1000 driver
2400 */
Mithlesh Thukral942798b2009-01-05 21:14:34 +05302401static int sxg_send_packets(struct sk_buff *skb, struct net_device *dev)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002402{
J.R. Mauro73b07062008-10-28 18:42:02 -04002403 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002404 u32 status = STATUS_SUCCESS;
2405
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302406 /*
2407 * DBG_ERROR("sxg: %s ENTER sxg_send_packets skb[%p]\n", __FUNCTION__,
2408 * skb);
2409 */
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05302410
J.R. Maurob243c4a2008-10-20 19:28:58 -04002411 /* Check the adapter state */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002412 switch (adapter->State) {
2413 case SXG_STATE_INITIALIZING:
2414 case SXG_STATE_HALTED:
2415 case SXG_STATE_SHUTDOWN:
J.R. Maurob243c4a2008-10-20 19:28:58 -04002416 ASSERT(0); /* unexpected */
2417 /* fall through */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002418 case SXG_STATE_RESETTING:
2419 case SXG_STATE_SLEEP:
2420 case SXG_STATE_BOOTDIAG:
2421 case SXG_STATE_DIAG:
2422 case SXG_STATE_HALTING:
2423 status = STATUS_FAILURE;
2424 break;
2425 case SXG_STATE_RUNNING:
2426 if (adapter->LinkState != SXG_LINK_UP) {
2427 status = STATUS_FAILURE;
2428 }
2429 break;
2430 default:
2431 ASSERT(0);
2432 status = STATUS_FAILURE;
2433 }
2434 if (status != STATUS_SUCCESS) {
2435 goto xmit_fail;
2436 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04002437 /* send a packet */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002438 status = sxg_transmit_packet(adapter, skb);
2439 if (status == STATUS_SUCCESS) {
2440 goto xmit_done;
2441 }
2442
2443 xmit_fail:
J.R. Maurob243c4a2008-10-20 19:28:58 -04002444 /* reject & complete all the packets if they cant be sent */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002445 if (status != STATUS_SUCCESS) {
2446#if XXXTODO
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302447 /* sxg_send_packets_fail(adapter, skb, status); */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002448#else
2449 SXG_DROP_DUMB_SEND(adapter, skb);
2450 adapter->stats.tx_dropped++;
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302451 return NETDEV_TX_BUSY;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002452#endif
2453 }
Harvey Harrisone88bd232008-10-17 14:46:10 -07002454 DBG_ERROR("sxg: %s EXIT sxg_send_packets status[%x]\n", __func__,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002455 status);
2456
2457 xmit_done:
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302458 return NETDEV_TX_OK;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002459}
2460
2461/*
2462 * sxg_transmit_packet
2463 *
2464 * This function transmits a single packet.
2465 *
2466 * Arguments -
2467 * adapter - Pointer to our adapter structure
2468 * skb - The packet to be sent
2469 *
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302470 * Return - STATUS of send
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002471 */
J.R. Mauro73b07062008-10-28 18:42:02 -04002472static int sxg_transmit_packet(struct adapter_t *adapter, struct sk_buff *skb)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002473{
Mithlesh Thukral942798b2009-01-05 21:14:34 +05302474 struct sxg_x64_sgl *pSgl;
2475 struct sxg_scatter_gather *SxgSgl;
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302476 unsigned long sgl_flags;
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05302477 /* void *SglBuffer; */
2478 /* u32 SglBufferLength; */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002479
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302480 /*
2481 * The vast majority of work is done in the shared
2482 * sxg_dumb_sgl routine.
2483 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002484 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSend",
2485 adapter, skb, 0, 0);
2486
J.R. Maurob243c4a2008-10-20 19:28:58 -04002487 /* Allocate a SGL buffer */
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302488 SXG_GET_SGL_BUFFER(adapter, SxgSgl, 0);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002489 if (!SxgSgl) {
2490 adapter->Stats.NoSglBuf++;
Mithlesh Thukral54aed112009-01-19 20:27:17 +05302491 adapter->stats.tx_errors++;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002492 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "SndPktF1",
2493 adapter, skb, 0, 0);
2494 return (STATUS_RESOURCES);
2495 }
2496 ASSERT(SxgSgl->adapter == adapter);
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05302497 /*SglBuffer = SXG_SGL_BUFFER(SxgSgl);
2498 SglBufferLength = SXG_SGL_BUF_SIZE; */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002499 SxgSgl->VlanTag.VlanTci = 0;
2500 SxgSgl->VlanTag.VlanTpid = 0;
2501 SxgSgl->Type = SXG_SGL_DUMB;
2502 SxgSgl->DumbPacket = skb;
2503 pSgl = NULL;
2504
J.R. Maurob243c4a2008-10-20 19:28:58 -04002505 /* Call the common sxg_dumb_sgl routine to complete the send. */
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302506 return (sxg_dumb_sgl(pSgl, SxgSgl));
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002507}
2508
2509/*
2510 * sxg_dumb_sgl
2511 *
2512 * Arguments:
2513 * pSgl -
Mithlesh Thukral942798b2009-01-05 21:14:34 +05302514 * SxgSgl - struct sxg_scatter_gather
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002515 *
2516 * Return Value:
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302517 * Status of send operation.
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002518 */
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302519static int sxg_dumb_sgl(struct sxg_x64_sgl *pSgl,
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302520 struct sxg_scatter_gather *SxgSgl)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002521{
J.R. Mauro73b07062008-10-28 18:42:02 -04002522 struct adapter_t *adapter = SxgSgl->adapter;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002523 struct sk_buff *skb = SxgSgl->DumbPacket;
J.R. Maurob243c4a2008-10-20 19:28:58 -04002524 /* For now, all dumb-nic sends go on RSS queue zero */
Mithlesh Thukral942798b2009-01-05 21:14:34 +05302525 struct sxg_xmt_ring *XmtRing = &adapter->XmtRings[0];
2526 struct sxg_ring_info *XmtRingInfo = &adapter->XmtRingZeroInfo;
2527 struct sxg_cmd *XmtCmd = NULL;
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302528 /* u32 Index = 0; */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002529 u32 DataLength = skb->len;
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302530 /* unsigned int BufLen; */
2531 /* u32 SglOffset; */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002532 u64 phys_addr;
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302533 unsigned long flags;
Mithlesh Thukral0d414722009-01-19 20:29:59 +05302534 unsigned long queue_id=0;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002535
2536 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSgl",
2537 pSgl, SxgSgl, 0, 0);
2538
J.R. Maurob243c4a2008-10-20 19:28:58 -04002539 /* Set aside a pointer to the sgl */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002540 SxgSgl->pSgl = pSgl;
2541
J.R. Maurob243c4a2008-10-20 19:28:58 -04002542 /* Sanity check that our SGL format is as we expect. */
Mithlesh Thukral942798b2009-01-05 21:14:34 +05302543 ASSERT(sizeof(struct sxg_x64_sge) == sizeof(struct sxg_x64_sge));
J.R. Maurob243c4a2008-10-20 19:28:58 -04002544 /* Shouldn't be a vlan tag on this frame */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002545 ASSERT(SxgSgl->VlanTag.VlanTci == 0);
2546 ASSERT(SxgSgl->VlanTag.VlanTpid == 0);
2547
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302548 /*
2549 * From here below we work with the SGL placed in our
2550 * buffer.
2551 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002552
2553 SxgSgl->Sgl.NumberOfElements = 1;
Mithlesh Thukral0d414722009-01-19 20:29:59 +05302554 /*
2555 * Set ucode Queue ID based on bottom bits of destination TCP port.
2556 * This Queue ID splits slowpath/dumb-nic packet processing across
2557 * multiple threads on the card to improve performance. It is split
2558 * using the TCP port to avoid out-of-order packets that can result
2559 * from multithreaded processing. We use the destination port because
2560 * we expect to be run on a server, so in nearly all cases the local
2561 * port is likely to be constant (well-known server port) and the
2562 * remote port is likely to be random. The exception to this is iSCSI,
2563 * in which case we use the sport instead. Note
2564 * that original attempt at XOR'ing source and dest port resulted in
2565 * poor balance on NTTTCP/iometer applications since they tend to
2566 * line up (even-even, odd-odd..).
2567 */
2568
2569 if (skb->protocol == htons(ETH_P_IP)) {
2570 struct iphdr *ip;
2571
2572 ip = ip_hdr(skb);
2573 if ((ip->protocol == IPPROTO_TCP)&&(DataLength >= sizeof(
2574 struct tcphdr))){
2575 queue_id = ((ntohs(tcp_hdr(skb)->dest) == ISCSI_PORT) ?
2576 (ntohs (tcp_hdr(skb)->source) &
2577 SXG_LARGE_SEND_QUEUE_MASK):
2578 (ntohs(tcp_hdr(skb)->dest) &
2579 SXG_LARGE_SEND_QUEUE_MASK));
2580 }
2581 } else if (skb->protocol == htons(ETH_P_IPV6)) {
Mithlesh Thukral9914f052009-02-18 18:51:29 +05302582 if ((ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) && (DataLength >=
Mithlesh Thukral0d414722009-01-19 20:29:59 +05302583 sizeof(struct tcphdr)) ) {
2584 queue_id = ((ntohs(tcp_hdr(skb)->dest) == ISCSI_PORT) ?
2585 (ntohs (tcp_hdr(skb)->source) &
2586 SXG_LARGE_SEND_QUEUE_MASK):
2587 (ntohs(tcp_hdr(skb)->dest) &
2588 SXG_LARGE_SEND_QUEUE_MASK));
2589 }
2590 }
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002591
J.R. Maurob243c4a2008-10-20 19:28:58 -04002592 /* Grab the spinlock and acquire a command */
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302593 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002594 SXG_GET_CMD(XmtRing, XmtRingInfo, XmtCmd, SxgSgl);
2595 if (XmtCmd == NULL) {
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302596 /*
2597 * Call sxg_complete_slow_send to see if we can
2598 * free up any XmtRingZero entries and then try again
2599 */
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302600
2601 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
Mithlesh Thukralc5e5cf52009-02-06 19:31:40 +05302602 sxg_complete_slow_send(adapter);
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302603 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002604 SXG_GET_CMD(XmtRing, XmtRingInfo, XmtCmd, SxgSgl);
2605 if (XmtCmd == NULL) {
2606 adapter->Stats.XmtZeroFull++;
2607 goto abortcmd;
2608 }
2609 }
2610 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbCmd",
2611 XmtCmd, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
J.R. Maurob243c4a2008-10-20 19:28:58 -04002612 /* Update stats */
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05302613 adapter->stats.tx_packets++;
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05302614 adapter->stats.tx_bytes += DataLength;
J.R. Maurob243c4a2008-10-20 19:28:58 -04002615#if XXXTODO /* Stats stuff */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002616 if (SXG_MULTICAST_PACKET(EtherHdr)) {
2617 if (SXG_BROADCAST_PACKET(EtherHdr)) {
2618 adapter->Stats.DumbXmtBcastPkts++;
2619 adapter->Stats.DumbXmtBcastBytes += DataLength;
2620 } else {
2621 adapter->Stats.DumbXmtMcastPkts++;
2622 adapter->Stats.DumbXmtMcastBytes += DataLength;
2623 }
2624 } else {
2625 adapter->Stats.DumbXmtUcastPkts++;
2626 adapter->Stats.DumbXmtUcastBytes += DataLength;
2627 }
2628#endif
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302629 /*
2630 * Fill in the command
2631 * Copy out the first SGE to the command and adjust for offset
2632 */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302633 phys_addr = pci_map_single(adapter->pcidev, skb->data, skb->len,
J.R. Mauro5c7514e2008-10-05 20:38:52 -04002634 PCI_DMA_TODEVICE);
Mithlesh Thukral7c66b142009-02-06 19:30:40 +05302635
2636 /*
2637 * SAHARA SGL WORKAROUND
2638 * See if the SGL straddles a 64k boundary. If so, skip to
2639 * the start of the next 64k boundary and continue
2640 */
2641
2642 if (SXG_INVALID_SGL(phys_addr,skb->data_len))
2643 {
2644 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
2645 /* Silently drop this packet */
2646 printk(KERN_EMERG"Dropped a packet for 64k boundary problem\n");
2647 return STATUS_SUCCESS;
2648 }
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05302649 memset(XmtCmd, '\0', sizeof(*XmtCmd));
2650 XmtCmd->Buffer.FirstSgeAddress = phys_addr;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002651 XmtCmd->Buffer.FirstSgeLength = DataLength;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002652 XmtCmd->Buffer.SgeOffset = 0;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002653 XmtCmd->Buffer.TotalLength = DataLength;
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05302654 XmtCmd->SgEntries = 1;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002655 XmtCmd->Flags = 0;
Mithlesh Thukral9914f052009-02-18 18:51:29 +05302656
2657 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2658 /*
2659 * We need to set the Checkum in IP header to 0. This is
2660 * required by hardware.
2661 */
2662 ip_hdr(skb)->check = 0x0;
2663 XmtCmd->CsumFlags.Flags |= SXG_SLOWCMD_CSUM_IP;
2664 XmtCmd->CsumFlags.Flags |= SXG_SLOWCMD_CSUM_TCP;
2665 /* Dont know if length will require a change in case of VLAN */
2666 XmtCmd->CsumFlags.MacLen = ETH_HLEN;
2667 XmtCmd->CsumFlags.IpHl = skb_network_header_len(skb) >>
2668 SXG_NW_HDR_LEN_SHIFT;
2669 }
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302670 /*
2671 * Advance transmit cmd descripter by 1.
2672 * NOTE - See comments in SxgTcpOutput where we write
2673 * to the XmtCmd register regarding CPU ID values and/or
2674 * multiple commands.
Mithlesh Thukral0d414722009-01-19 20:29:59 +05302675 * Top 16 bits specify queue_id. See comments about queue_id above
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302676 */
Mithlesh Thukral0d414722009-01-19 20:29:59 +05302677 /* Four queues at the moment */
2678 ASSERT((queue_id & ~SXG_LARGE_SEND_QUEUE_MASK) == 0);
2679 WRITE_REG(adapter->UcodeRegs[0].XmtCmd, ((queue_id << 16) | 1), TRUE);
J.R. Maurob243c4a2008-10-20 19:28:58 -04002680 adapter->Stats.XmtQLen++; /* Stats within lock */
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302681 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002682 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDumSgl2",
2683 XmtCmd, pSgl, SxgSgl, 0);
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302684 return STATUS_SUCCESS;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002685
2686 abortcmd:
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302687 /*
2688 * NOTE - Only jump to this label AFTER grabbing the
2689 * XmtZeroLock, and DO NOT DROP IT between the
2690 * command allocation and the following abort.
2691 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002692 if (XmtCmd) {
2693 SXG_ABORT_CMD(XmtRingInfo);
2694 }
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302695 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002696
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302697/*
2698 * failsgl:
2699 * Jump to this label if failure occurs before the
2700 * XmtZeroLock is grabbed
2701 */
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05302702 adapter->stats.tx_errors++;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002703 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumSGFal",
2704 pSgl, SxgSgl, XmtRingInfo->Head, XmtRingInfo->Tail);
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302705 /* SxgSgl->DumbPacket is the skb */
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05302706 // SXG_COMPLETE_DUMB_SEND(adapter, SxgSgl->DumbPacket);
Mithlesh Thukral54aed112009-01-19 20:27:17 +05302707
2708 return STATUS_FAILURE;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002709}
2710
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002711/*
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302712 * Link management functions
2713 *
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002714 * sxg_initialize_link - Initialize the link stuff
2715 *
2716 * Arguments -
2717 * adapter - A pointer to our adapter structure
2718 *
2719 * Return
2720 * status
2721 */
J.R. Mauro73b07062008-10-28 18:42:02 -04002722static int sxg_initialize_link(struct adapter_t *adapter)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002723{
Mithlesh Thukral942798b2009-01-05 21:14:34 +05302724 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002725 u32 Value;
2726 u32 ConfigData;
2727 u32 MaxFrame;
2728 int status;
2729
2730 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "InitLink",
2731 adapter, 0, 0, 0);
2732
J.R. Maurob243c4a2008-10-20 19:28:58 -04002733 /* Reset PHY and XGXS module */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002734 WRITE_REG(HwRegs->LinkStatus, LS_SERDES_POWER_DOWN, TRUE);
2735
J.R. Maurob243c4a2008-10-20 19:28:58 -04002736 /* Reset transmit configuration register */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002737 WRITE_REG(HwRegs->XmtConfig, XMT_CONFIG_RESET, TRUE);
2738
J.R. Maurob243c4a2008-10-20 19:28:58 -04002739 /* Reset receive configuration register */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002740 WRITE_REG(HwRegs->RcvConfig, RCV_CONFIG_RESET, TRUE);
2741
J.R. Maurob243c4a2008-10-20 19:28:58 -04002742 /* Reset all MAC modules */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002743 WRITE_REG(HwRegs->MacConfig0, AXGMAC_CFG0_SUB_RESET, TRUE);
2744
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302745 /*
2746 * Link address 0
2747 * XXXTODO - This assumes the MAC address (0a:0b:0c:0d:0e:0f)
2748 * is stored with the first nibble (0a) in the byte 0
2749 * of the Mac address. Possibly reverse?
2750 */
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05302751 Value = *(u32 *) adapter->macaddr;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002752 WRITE_REG(HwRegs->LinkAddress0Low, Value, TRUE);
J.R. Maurob243c4a2008-10-20 19:28:58 -04002753 /* also write the MAC address to the MAC. Endian is reversed. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002754 WRITE_REG(HwRegs->MacAddressLow, ntohl(Value), TRUE);
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05302755 Value = (*(u16 *) & adapter->macaddr[4] & 0x0000FFFF);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002756 WRITE_REG(HwRegs->LinkAddress0High, Value | LINK_ADDRESS_ENABLE, TRUE);
J.R. Maurob243c4a2008-10-20 19:28:58 -04002757 /* endian swap for the MAC (put high bytes in bits [31:16], swapped) */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002758 Value = ntohl(Value);
2759 WRITE_REG(HwRegs->MacAddressHigh, Value, TRUE);
J.R. Maurob243c4a2008-10-20 19:28:58 -04002760 /* Link address 1 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002761 WRITE_REG(HwRegs->LinkAddress1Low, 0, TRUE);
2762 WRITE_REG(HwRegs->LinkAddress1High, 0, TRUE);
J.R. Maurob243c4a2008-10-20 19:28:58 -04002763 /* Link address 2 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002764 WRITE_REG(HwRegs->LinkAddress2Low, 0, TRUE);
2765 WRITE_REG(HwRegs->LinkAddress2High, 0, TRUE);
J.R. Maurob243c4a2008-10-20 19:28:58 -04002766 /* Link address 3 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002767 WRITE_REG(HwRegs->LinkAddress3Low, 0, TRUE);
2768 WRITE_REG(HwRegs->LinkAddress3High, 0, TRUE);
2769
J.R. Maurob243c4a2008-10-20 19:28:58 -04002770 /* Enable MAC modules */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002771 WRITE_REG(HwRegs->MacConfig0, 0, TRUE);
2772
J.R. Maurob243c4a2008-10-20 19:28:58 -04002773 /* Configure MAC */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302774 WRITE_REG(HwRegs->MacConfig1, (
2775 /* Allow sending of pause */
2776 AXGMAC_CFG1_XMT_PAUSE |
2777 /* Enable XMT */
2778 AXGMAC_CFG1_XMT_EN |
2779 /* Enable detection of pause */
2780 AXGMAC_CFG1_RCV_PAUSE |
2781 /* Enable receive */
2782 AXGMAC_CFG1_RCV_EN |
2783 /* short frame detection */
2784 AXGMAC_CFG1_SHORT_ASSERT |
2785 /* Verify frame length */
2786 AXGMAC_CFG1_CHECK_LEN |
2787 /* Generate FCS */
2788 AXGMAC_CFG1_GEN_FCS |
2789 /* Pad frames to 64 bytes */
2790 AXGMAC_CFG1_PAD_64),
2791 TRUE);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002792
J.R. Maurob243c4a2008-10-20 19:28:58 -04002793 /* Set AXGMAC max frame length if jumbo. Not needed for standard MTU */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002794 if (adapter->JumboEnabled) {
2795 WRITE_REG(HwRegs->MacMaxFrameLen, AXGMAC_MAXFRAME_JUMBO, TRUE);
2796 }
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302797 /*
2798 * AMIIM Configuration Register -
2799 * The value placed in the AXGMAC_AMIIM_CFG_HALF_CLOCK portion
2800 * (bottom bits) of this register is used to determine the MDC frequency
2801 * as specified in the A-XGMAC Design Document. This value must not be
2802 * zero. The following value (62 or 0x3E) is based on our MAC transmit
2803 * clock frequency (MTCLK) of 312.5 MHz. Given a maximum MDIO clock
2804 * frequency of 2.5 MHz (see the PHY spec), we get:
2805 * 312.5/(2*(X+1)) < 2.5 ==> X = 62.
2806 * This value happens to be the default value for this register, so we
2807 * really don't have to do this.
2808 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002809 WRITE_REG(HwRegs->MacAmiimConfig, 0x0000003E, TRUE);
2810
J.R. Maurob243c4a2008-10-20 19:28:58 -04002811 /* Power up and enable PHY and XAUI/XGXS/Serdes logic */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002812 WRITE_REG(HwRegs->LinkStatus,
2813 (LS_PHY_CLR_RESET |
2814 LS_XGXS_ENABLE |
2815 LS_XGXS_CTL | LS_PHY_CLK_EN | LS_ATTN_ALARM), TRUE);
2816 DBG_ERROR("After Power Up and enable PHY in sxg_initialize_link\n");
2817
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302818 /*
2819 * Per information given by Aeluros, wait 100 ms after removing reset.
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302820 * It's not enough to wait for the self-clearing reset bit in reg 0 to
2821 * clear.
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302822 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002823 mdelay(100);
2824
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302825 /* Verify the PHY has come up by checking that the Reset bit has
2826 * cleared.
2827 */
2828 status = sxg_read_mdio_reg(adapter,
2829 MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
2830 PHY_PMA_CONTROL1, /* PMA/PMD control register */
2831 &Value);
2832 DBG_ERROR("After sxg_read_mdio_reg Value[%x] fail=%x\n", Value,
2833 (Value & PMA_CONTROL1_RESET));
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002834 if (status != STATUS_SUCCESS)
2835 return (STATUS_FAILURE);
J.R. Maurob243c4a2008-10-20 19:28:58 -04002836 if (Value & PMA_CONTROL1_RESET) /* reset complete if bit is 0 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002837 return (STATUS_FAILURE);
2838
J.R. Maurob243c4a2008-10-20 19:28:58 -04002839 /* The SERDES should be initialized by now - confirm */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002840 READ_REG(HwRegs->LinkStatus, Value);
J.R. Maurob243c4a2008-10-20 19:28:58 -04002841 if (Value & LS_SERDES_DOWN) /* verify SERDES is initialized */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002842 return (STATUS_FAILURE);
2843
J.R. Maurob243c4a2008-10-20 19:28:58 -04002844 /* The XAUI link should also be up - confirm */
2845 if (!(Value & LS_XAUI_LINK_UP)) /* verify XAUI link is up */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002846 return (STATUS_FAILURE);
2847
J.R. Maurob243c4a2008-10-20 19:28:58 -04002848 /* Initialize the PHY */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002849 status = sxg_phy_init(adapter);
2850 if (status != STATUS_SUCCESS)
2851 return (STATUS_FAILURE);
2852
J.R. Maurob243c4a2008-10-20 19:28:58 -04002853 /* Enable the Link Alarm */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302854
2855 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module
2856 * LASI_CONTROL - LASI control register
2857 * LASI_CTL_LS_ALARM_ENABLE - enable link alarm bit
2858 */
2859 status = sxg_write_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
2860 LASI_CONTROL,
2861 LASI_CTL_LS_ALARM_ENABLE);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002862 if (status != STATUS_SUCCESS)
2863 return (STATUS_FAILURE);
2864
J.R. Maurob243c4a2008-10-20 19:28:58 -04002865 /* XXXTODO - temporary - verify bit is set */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302866
2867 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module
2868 * LASI_CONTROL - LASI control register
2869 */
2870 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
2871 LASI_CONTROL,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002872 &Value);
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302873
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002874 if (status != STATUS_SUCCESS)
2875 return (STATUS_FAILURE);
2876 if (!(Value & LASI_CTL_LS_ALARM_ENABLE)) {
2877 DBG_ERROR("Error! LASI Control Alarm Enable bit not set!\n");
2878 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04002879 /* Enable receive */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002880 MaxFrame = adapter->JumboEnabled ? JUMBOMAXFRAME : ETHERMAXFRAME;
2881 ConfigData = (RCV_CONFIG_ENABLE |
2882 RCV_CONFIG_ENPARSE |
2883 RCV_CONFIG_RCVBAD |
2884 RCV_CONFIG_RCVPAUSE |
2885 RCV_CONFIG_TZIPV6 |
2886 RCV_CONFIG_TZIPV4 |
2887 RCV_CONFIG_HASH_16 |
2888 RCV_CONFIG_SOCKET | RCV_CONFIG_BUFSIZE(MaxFrame));
2889 WRITE_REG(HwRegs->RcvConfig, ConfigData, TRUE);
2890
2891 WRITE_REG(HwRegs->XmtConfig, XMT_CONFIG_ENABLE, TRUE);
2892
J.R. Maurob243c4a2008-10-20 19:28:58 -04002893 /* Mark the link as down. We'll get a link event when it comes up. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002894 sxg_link_state(adapter, SXG_LINK_DOWN);
2895
2896 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XInitLnk",
2897 adapter, 0, 0, 0);
2898 return (STATUS_SUCCESS);
2899}
2900
2901/*
2902 * sxg_phy_init - Initialize the PHY
2903 *
2904 * Arguments -
2905 * adapter - A pointer to our adapter structure
2906 *
2907 * Return
2908 * status
2909 */
J.R. Mauro73b07062008-10-28 18:42:02 -04002910static int sxg_phy_init(struct adapter_t *adapter)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002911{
2912 u32 Value;
Mithlesh Thukral942798b2009-01-05 21:14:34 +05302913 struct phy_ucode *p;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002914 int status;
2915
Harvey Harrisone88bd232008-10-17 14:46:10 -07002916 DBG_ERROR("ENTER %s\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002917
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302918 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module
2919 * 0xC205 - PHY ID register (?)
2920 * &Value - XXXTODO - add def
2921 */
2922 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
2923 0xC205,
2924 &Value);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002925 if (status != STATUS_SUCCESS)
2926 return (STATUS_FAILURE);
2927
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302928 if (Value == 0x0012) {
2929 /* 0x0012 == AEL2005C PHY(?) - XXXTODO - add def */
2930 DBG_ERROR("AEL2005C PHY detected. Downloading PHY \
2931 microcode.\n");
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002932
J.R. Maurob243c4a2008-10-20 19:28:58 -04002933 /* Initialize AEL2005C PHY and download PHY microcode */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002934 for (p = PhyUcode; p->Addr != 0xFFFF; p++) {
2935 if (p->Addr == 0) {
J.R. Maurob243c4a2008-10-20 19:28:58 -04002936 /* if address == 0, data == sleep time in ms */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002937 mdelay(p->Data);
2938 } else {
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302939 /* write the given data to the specified address */
2940 status = sxg_write_mdio_reg(adapter,
2941 MIIM_DEV_PHY_PMA,
2942 /* PHY address */
2943 p->Addr,
2944 /* PHY data */
2945 p->Data);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002946 if (status != STATUS_SUCCESS)
2947 return (STATUS_FAILURE);
2948 }
2949 }
2950 }
Harvey Harrisone88bd232008-10-17 14:46:10 -07002951 DBG_ERROR("EXIT %s\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002952
2953 return (STATUS_SUCCESS);
2954}
2955
2956/*
2957 * sxg_link_event - Process a link event notification from the card
2958 *
2959 * Arguments -
2960 * adapter - A pointer to our adapter structure
2961 *
2962 * Return
2963 * None
2964 */
J.R. Mauro73b07062008-10-28 18:42:02 -04002965static void sxg_link_event(struct adapter_t *adapter)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002966{
Mithlesh Thukral942798b2009-01-05 21:14:34 +05302967 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
Mithlesh Thukral0d414722009-01-19 20:29:59 +05302968 struct net_device *netdev = adapter->netdev;
J.R. Mauro73b07062008-10-28 18:42:02 -04002969 enum SXG_LINK_STATE LinkState;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002970 int status;
2971 u32 Value;
2972
2973 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "LinkEvnt",
2974 adapter, 0, 0, 0);
Harvey Harrisone88bd232008-10-17 14:46:10 -07002975 DBG_ERROR("ENTER %s\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002976
J.R. Maurob243c4a2008-10-20 19:28:58 -04002977 /* Check the Link Status register. We should have a Link Alarm. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002978 READ_REG(HwRegs->LinkStatus, Value);
2979 if (Value & LS_LINK_ALARM) {
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05302980 /*
2981 * We got a Link Status alarm. First, pause to let the
2982 * link state settle (it can bounce a number of times)
2983 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002984 mdelay(10);
2985
J.R. Maurob243c4a2008-10-20 19:28:58 -04002986 /* Now clear the alarm by reading the LASI status register. */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302987 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module */
2988 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
2989 /* LASI status register */
2990 LASI_STATUS,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002991 &Value);
2992 if (status != STATUS_SUCCESS) {
2993 DBG_ERROR("Error reading LASI Status MDIO register!\n");
2994 sxg_link_state(adapter, SXG_LINK_DOWN);
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05302995 /* ASSERT(0); */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07002996 }
2997 ASSERT(Value & LASI_STATUS_LS_ALARM);
2998
J.R. Maurob243c4a2008-10-20 19:28:58 -04002999 /* Now get and set the link state */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003000 LinkState = sxg_get_link_state(adapter);
3001 sxg_link_state(adapter, LinkState);
3002 DBG_ERROR("SXG: Link Alarm occurred. Link is %s\n",
3003 ((LinkState == SXG_LINK_UP) ? "UP" : "DOWN"));
Mithlesh Thukral0d414722009-01-19 20:29:59 +05303004 if (LinkState == SXG_LINK_UP)
3005 netif_carrier_on(netdev);
3006 else
3007 netif_carrier_off(netdev);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003008 } else {
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05303009 /*
3010 * XXXTODO - Assuming Link Attention is only being generated
3011 * for the Link Alarm pin (and not for a XAUI Link Status change)
3012 * , then it's impossible to get here. Yet we've gotten here
3013 * twice (under extreme conditions - bouncing the link up and
3014 * down many times a second). Needs further investigation.
3015 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003016 DBG_ERROR("SXG: sxg_link_event: Can't get here!\n");
3017 DBG_ERROR("SXG: Link Status == 0x%08X.\n", Value);
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303018 /* ASSERT(0); */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003019 }
Harvey Harrisone88bd232008-10-17 14:46:10 -07003020 DBG_ERROR("EXIT %s\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003021
3022}
3023
3024/*
3025 * sxg_get_link_state - Determine if the link is up or down
3026 *
3027 * Arguments -
3028 * adapter - A pointer to our adapter structure
3029 *
3030 * Return
3031 * Link State
3032 */
J.R. Mauro73b07062008-10-28 18:42:02 -04003033static enum SXG_LINK_STATE sxg_get_link_state(struct adapter_t *adapter)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003034{
3035 int status;
3036 u32 Value;
3037
Harvey Harrisone88bd232008-10-17 14:46:10 -07003038 DBG_ERROR("ENTER %s\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003039
3040 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "GetLink",
3041 adapter, 0, 0, 0);
3042
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05303043 /*
3044 * Per the Xenpak spec (and the IEEE 10Gb spec?), the link is up if
3045 * the following 3 bits (from 3 different MDIO registers) are all true.
3046 */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303047
3048 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module */
3049 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
3050 /* PMA/PMD Receive Signal Detect register */
3051 PHY_PMA_RCV_DET,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003052 &Value);
3053 if (status != STATUS_SUCCESS)
3054 goto bad;
3055
J.R. Maurob243c4a2008-10-20 19:28:58 -04003056 /* If PMA/PMD receive signal detect is 0, then the link is down */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003057 if (!(Value & PMA_RCV_DETECT))
3058 return (SXG_LINK_DOWN);
3059
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303060 /* MIIM_DEV_PHY_PCS - PHY PCS module */
3061 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PCS,
3062 /* PCS 10GBASE-R Status 1 register */
3063 PHY_PCS_10G_STATUS1,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003064 &Value);
3065 if (status != STATUS_SUCCESS)
3066 goto bad;
3067
J.R. Maurob243c4a2008-10-20 19:28:58 -04003068 /* If PCS is not locked to receive blocks, then the link is down */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003069 if (!(Value & PCS_10B_BLOCK_LOCK))
3070 return (SXG_LINK_DOWN);
3071
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303072 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_XS,/* PHY XS module */
3073 /* XS Lane Status register */
3074 PHY_XS_LANE_STATUS,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003075 &Value);
3076 if (status != STATUS_SUCCESS)
3077 goto bad;
3078
J.R. Maurob243c4a2008-10-20 19:28:58 -04003079 /* If XS transmit lanes are not aligned, then the link is down */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003080 if (!(Value & XS_LANE_ALIGN))
3081 return (SXG_LINK_DOWN);
3082
J.R. Maurob243c4a2008-10-20 19:28:58 -04003083 /* All 3 bits are true, so the link is up */
Harvey Harrisone88bd232008-10-17 14:46:10 -07003084 DBG_ERROR("EXIT %s\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003085
3086 return (SXG_LINK_UP);
3087
3088 bad:
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303089 /* An error occurred reading an MDIO register. This shouldn't happen. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003090 DBG_ERROR("Error reading an MDIO register!\n");
3091 ASSERT(0);
3092 return (SXG_LINK_DOWN);
3093}
3094
J.R. Mauro73b07062008-10-28 18:42:02 -04003095static void sxg_indicate_link_state(struct adapter_t *adapter,
3096 enum SXG_LINK_STATE LinkState)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003097{
3098 if (adapter->LinkState == SXG_LINK_UP) {
3099 DBG_ERROR("%s: LINK now UP, call netif_start_queue\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -07003100 __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003101 netif_start_queue(adapter->netdev);
3102 } else {
3103 DBG_ERROR("%s: LINK now DOWN, call netif_stop_queue\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -07003104 __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003105 netif_stop_queue(adapter->netdev);
3106 }
3107}
3108
3109/*
Mithlesh Thukral7c66b142009-02-06 19:30:40 +05303110 * sxg_change_mtu - Change the Maximum Transfer Unit
3111 * * @returns 0 on success, negative on failure
3112 */
3113int sxg_change_mtu (struct net_device *netdev, int new_mtu)
3114{
3115 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(netdev);
3116
3117 if (!((new_mtu == SXG_DEFAULT_MTU) || (new_mtu == SXG_JUMBO_MTU)))
3118 return -EINVAL;
3119
3120 if(new_mtu == netdev->mtu)
3121 return 0;
3122
3123 netdev->mtu = new_mtu;
3124
3125 if (new_mtu == SXG_JUMBO_MTU) {
3126 adapter->JumboEnabled = TRUE;
3127 adapter->FrameSize = JUMBOMAXFRAME;
3128 adapter->ReceiveBufferSize = SXG_RCV_JUMBO_BUFFER_SIZE;
3129 } else {
3130 adapter->JumboEnabled = FALSE;
3131 adapter->FrameSize = ETHERMAXFRAME;
3132 adapter->ReceiveBufferSize = SXG_RCV_DATA_BUFFER_SIZE;
3133 }
3134
3135 sxg_entry_halt(netdev);
3136 sxg_entry_open(netdev);
3137 return 0;
3138}
3139
3140/*
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003141 * sxg_link_state - Set the link state and if necessary, indicate.
3142 * This routine the central point of processing for all link state changes.
3143 * Nothing else in the driver should alter the link state or perform
3144 * link state indications
3145 *
3146 * Arguments -
3147 * adapter - A pointer to our adapter structure
3148 * LinkState - The link state
3149 *
3150 * Return
3151 * None
3152 */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303153static void sxg_link_state(struct adapter_t *adapter,
3154 enum SXG_LINK_STATE LinkState)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003155{
3156 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "LnkINDCT",
3157 adapter, LinkState, adapter->LinkState, adapter->State);
3158
Harvey Harrisone88bd232008-10-17 14:46:10 -07003159 DBG_ERROR("ENTER %s\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003160
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05303161 /*
3162 * Hold the adapter lock during this routine. Maybe move
3163 * the lock to the caller.
3164 */
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05303165 /* IMP TODO : Check if we can survive without taking this lock */
3166// spin_lock(&adapter->AdapterLock);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003167 if (LinkState == adapter->LinkState) {
J.R. Maurob243c4a2008-10-20 19:28:58 -04003168 /* Nothing changed.. */
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05303169// spin_unlock(&adapter->AdapterLock);
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303170 DBG_ERROR("EXIT #0 %s. Link status = %d\n",
3171 __func__, LinkState);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003172 return;
3173 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04003174 /* Save the adapter state */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003175 adapter->LinkState = LinkState;
3176
J.R. Maurob243c4a2008-10-20 19:28:58 -04003177 /* Drop the lock and indicate link state */
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05303178// spin_unlock(&adapter->AdapterLock);
Harvey Harrisone88bd232008-10-17 14:46:10 -07003179 DBG_ERROR("EXIT #1 %s\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003180
3181 sxg_indicate_link_state(adapter, LinkState);
3182}
3183
3184/*
3185 * sxg_write_mdio_reg - Write to a register on the MDIO bus
3186 *
3187 * Arguments -
3188 * adapter - A pointer to our adapter structure
3189 * DevAddr - MDIO device number being addressed
3190 * RegAddr - register address for the specified MDIO device
3191 * Value - value to write to the MDIO register
3192 *
3193 * Return
3194 * status
3195 */
J.R. Mauro73b07062008-10-28 18:42:02 -04003196static int sxg_write_mdio_reg(struct adapter_t *adapter,
J.R. Mauro5c7514e2008-10-05 20:38:52 -04003197 u32 DevAddr, u32 RegAddr, u32 Value)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003198{
Mithlesh Thukral942798b2009-01-05 21:14:34 +05303199 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303200 /* Address operation (written to MIIM field reg) */
3201 u32 AddrOp;
3202 /* Write operation (written to MIIM field reg) */
3203 u32 WriteOp;
3204 u32 Cmd;/* Command (written to MIIM command reg) */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003205 u32 ValueRead;
3206 u32 Timeout;
3207
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303208 /* DBG_ERROR("ENTER %s\n", __func__); */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003209
3210 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO",
3211 adapter, 0, 0, 0);
3212
J.R. Maurob243c4a2008-10-20 19:28:58 -04003213 /* Ensure values don't exceed field width */
3214 DevAddr &= 0x001F; /* 5-bit field */
3215 RegAddr &= 0xFFFF; /* 16-bit field */
3216 Value &= 0xFFFF; /* 16-bit field */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003217
J.R. Maurob243c4a2008-10-20 19:28:58 -04003218 /* Set MIIM field register bits for an MIIM address operation */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003219 AddrOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
3220 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
3221 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
3222 (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) | RegAddr;
3223
J.R. Maurob243c4a2008-10-20 19:28:58 -04003224 /* Set MIIM field register bits for an MIIM write operation */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003225 WriteOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
3226 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
3227 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
3228 (MIIM_OP_WRITE << AXGMAC_AMIIM_FIELD_OP_SHIFT) | Value;
3229
J.R. Maurob243c4a2008-10-20 19:28:58 -04003230 /* Set MIIM command register bits to execute an MIIM command */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003231 Cmd = AXGMAC_AMIIM_CMD_START | AXGMAC_AMIIM_CMD_10G_OPERATION;
3232
J.R. Maurob243c4a2008-10-20 19:28:58 -04003233 /* Reset the command register command bit (in case it's not 0) */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003234 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
3235
J.R. Maurob243c4a2008-10-20 19:28:58 -04003236 /* MIIM write to set the address of the specified MDIO register */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003237 WRITE_REG(HwRegs->MacAmiimField, AddrOp, TRUE);
3238
J.R. Maurob243c4a2008-10-20 19:28:58 -04003239 /* Write to MIIM Command Register to execute to address operation */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003240 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
3241
J.R. Maurob243c4a2008-10-20 19:28:58 -04003242 /* Poll AMIIM Indicator register to wait for completion */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003243 Timeout = SXG_LINK_TIMEOUT;
3244 do {
J.R. Maurob243c4a2008-10-20 19:28:58 -04003245 udelay(100); /* Timeout in 100us units */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003246 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
3247 if (--Timeout == 0) {
3248 return (STATUS_FAILURE);
3249 }
3250 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
3251
J.R. Maurob243c4a2008-10-20 19:28:58 -04003252 /* Reset the command register command bit */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003253 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
3254
J.R. Maurob243c4a2008-10-20 19:28:58 -04003255 /* MIIM write to set up an MDIO write operation */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003256 WRITE_REG(HwRegs->MacAmiimField, WriteOp, TRUE);
3257
J.R. Maurob243c4a2008-10-20 19:28:58 -04003258 /* Write to MIIM Command Register to execute the write operation */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003259 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
3260
J.R. Maurob243c4a2008-10-20 19:28:58 -04003261 /* Poll AMIIM Indicator register to wait for completion */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003262 Timeout = SXG_LINK_TIMEOUT;
3263 do {
J.R. Maurob243c4a2008-10-20 19:28:58 -04003264 udelay(100); /* Timeout in 100us units */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003265 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
3266 if (--Timeout == 0) {
3267 return (STATUS_FAILURE);
3268 }
3269 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
3270
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303271 /* DBG_ERROR("EXIT %s\n", __func__); */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003272
3273 return (STATUS_SUCCESS);
3274}
3275
3276/*
3277 * sxg_read_mdio_reg - Read a register on the MDIO bus
3278 *
3279 * Arguments -
3280 * adapter - A pointer to our adapter structure
3281 * DevAddr - MDIO device number being addressed
3282 * RegAddr - register address for the specified MDIO device
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303283 * pValue - pointer to where to put data read from the MDIO register
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003284 *
3285 * Return
3286 * status
3287 */
J.R. Mauro73b07062008-10-28 18:42:02 -04003288static int sxg_read_mdio_reg(struct adapter_t *adapter,
J.R. Mauro5c7514e2008-10-05 20:38:52 -04003289 u32 DevAddr, u32 RegAddr, u32 *pValue)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003290{
Mithlesh Thukral942798b2009-01-05 21:14:34 +05303291 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303292 u32 AddrOp; /* Address operation (written to MIIM field reg) */
3293 u32 ReadOp; /* Read operation (written to MIIM field reg) */
3294 u32 Cmd; /* Command (written to MIIM command reg) */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003295 u32 ValueRead;
3296 u32 Timeout;
3297
3298 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO",
3299 adapter, 0, 0, 0);
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303300 DBG_ERROR("ENTER %s\n", __FUNCTION__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003301
J.R. Maurob243c4a2008-10-20 19:28:58 -04003302 /* Ensure values don't exceed field width */
3303 DevAddr &= 0x001F; /* 5-bit field */
3304 RegAddr &= 0xFFFF; /* 16-bit field */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003305
J.R. Maurob243c4a2008-10-20 19:28:58 -04003306 /* Set MIIM field register bits for an MIIM address operation */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003307 AddrOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
3308 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
3309 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
3310 (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) | RegAddr;
3311
J.R. Maurob243c4a2008-10-20 19:28:58 -04003312 /* Set MIIM field register bits for an MIIM read operation */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003313 ReadOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
3314 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
3315 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
3316 (MIIM_OP_READ << AXGMAC_AMIIM_FIELD_OP_SHIFT);
3317
J.R. Maurob243c4a2008-10-20 19:28:58 -04003318 /* Set MIIM command register bits to execute an MIIM command */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003319 Cmd = AXGMAC_AMIIM_CMD_START | AXGMAC_AMIIM_CMD_10G_OPERATION;
3320
J.R. Maurob243c4a2008-10-20 19:28:58 -04003321 /* Reset the command register command bit (in case it's not 0) */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003322 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
3323
J.R. Maurob243c4a2008-10-20 19:28:58 -04003324 /* MIIM write to set the address of the specified MDIO register */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003325 WRITE_REG(HwRegs->MacAmiimField, AddrOp, TRUE);
3326
J.R. Maurob243c4a2008-10-20 19:28:58 -04003327 /* Write to MIIM Command Register to execute to address operation */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003328 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
3329
J.R. Maurob243c4a2008-10-20 19:28:58 -04003330 /* Poll AMIIM Indicator register to wait for completion */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003331 Timeout = SXG_LINK_TIMEOUT;
3332 do {
J.R. Maurob243c4a2008-10-20 19:28:58 -04003333 udelay(100); /* Timeout in 100us units */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003334 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
3335 if (--Timeout == 0) {
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05303336 DBG_ERROR("EXIT %s with STATUS_FAILURE 1\n", __FUNCTION__);
3337
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003338 return (STATUS_FAILURE);
3339 }
3340 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
3341
J.R. Maurob243c4a2008-10-20 19:28:58 -04003342 /* Reset the command register command bit */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003343 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
3344
J.R. Maurob243c4a2008-10-20 19:28:58 -04003345 /* MIIM write to set up an MDIO register read operation */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003346 WRITE_REG(HwRegs->MacAmiimField, ReadOp, TRUE);
3347
J.R. Maurob243c4a2008-10-20 19:28:58 -04003348 /* Write to MIIM Command Register to execute the read operation */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003349 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
3350
J.R. Maurob243c4a2008-10-20 19:28:58 -04003351 /* Poll AMIIM Indicator register to wait for completion */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003352 Timeout = SXG_LINK_TIMEOUT;
3353 do {
J.R. Maurob243c4a2008-10-20 19:28:58 -04003354 udelay(100); /* Timeout in 100us units */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003355 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
3356 if (--Timeout == 0) {
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05303357 DBG_ERROR("EXIT %s with STATUS_FAILURE 2\n", __FUNCTION__);
3358
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003359 return (STATUS_FAILURE);
3360 }
3361 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
3362
J.R. Maurob243c4a2008-10-20 19:28:58 -04003363 /* Read the MDIO register data back from the field register */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003364 READ_REG(HwRegs->MacAmiimField, *pValue);
J.R. Maurob243c4a2008-10-20 19:28:58 -04003365 *pValue &= 0xFFFF; /* data is in the lower 16 bits */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003366
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303367 DBG_ERROR("EXIT %s\n", __FUNCTION__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003368
3369 return (STATUS_SUCCESS);
3370}
3371
3372/*
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -07003373 * Functions to obtain the CRC corresponding to the destination mac address.
3374 * This is a standard ethernet CRC in that it is a 32-bit, reflected CRC using
3375 * the polynomial:
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303376 * x^32 + x^26 + x^23 + x^22 + x^16 + x^12 + x^11 + x^10 + x^8 + x^7 + x^5
3377 * + x^4 + x^2 + x^1.
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -07003378 *
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303379 * After the CRC for the 6 bytes is generated (but before the value is
3380 * complemented), we must then transpose the value and return bits 30-23.
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -07003381 */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303382static u32 sxg_crc_table[256];/* Table of CRC's for all possible byte values */
3383static u32 sxg_crc_init; /* Is table initialized */
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -07003384
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303385/* Contruct the CRC32 table */
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -07003386static void sxg_mcast_init_crc32(void)
3387{
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303388 u32 c; /* CRC shit reg */
3389 u32 e = 0; /* Poly X-or pattern */
3390 int i; /* counter */
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -07003391 int k; /* byte being shifted into crc */
3392
3393 static int p[] = { 0, 1, 2, 4, 5, 7, 8, 10, 11, 12, 16, 22, 23, 26 };
3394
3395 for (i = 0; i < sizeof(p) / sizeof(int); i++) {
3396 e |= 1L << (31 - p[i]);
3397 }
3398
3399 for (i = 1; i < 256; i++) {
3400 c = i;
3401 for (k = 8; k; k--) {
3402 c = c & 1 ? (c >> 1) ^ e : c >> 1;
3403 }
3404 sxg_crc_table[i] = c;
3405 }
3406}
3407
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -07003408/*
3409 * Return the MAC hast as described above.
3410 */
3411static unsigned char sxg_mcast_get_mac_hash(char *macaddr)
3412{
3413 u32 crc;
3414 char *p;
3415 int i;
3416 unsigned char machash = 0;
3417
3418 if (!sxg_crc_init) {
3419 sxg_mcast_init_crc32();
3420 sxg_crc_init = 1;
3421 }
3422
3423 crc = 0xFFFFFFFF; /* Preload shift register, per crc-32 spec */
3424 for (i = 0, p = macaddr; i < 6; ++p, ++i) {
3425 crc = (crc >> 8) ^ sxg_crc_table[(crc ^ *p) & 0xFF];
3426 }
3427
3428 /* Return bits 1-8, transposed */
3429 for (i = 1; i < 9; i++) {
3430 machash |= (((crc >> i) & 1) << (8 - i));
3431 }
3432
3433 return (machash);
3434}
3435
J.R. Mauro73b07062008-10-28 18:42:02 -04003436static void sxg_mcast_set_mask(struct adapter_t *adapter)
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -07003437{
Mithlesh Thukral942798b2009-01-05 21:14:34 +05303438 struct sxg_ucode_regs *sxg_regs = adapter->UcodeRegs;
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -07003439
Mithlesh Thukralb040b072009-01-28 07:08:11 +05303440 DBG_ERROR("%s ENTER (%s) MacFilter[%x] mask[%llx]\n", __FUNCTION__,
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -07003441 adapter->netdev->name, (unsigned int)adapter->MacFilter,
3442 adapter->MulticastMask);
3443
3444 if (adapter->MacFilter & (MAC_ALLMCAST | MAC_PROMISC)) {
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05303445 /*
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303446 * Turn on all multicast addresses. We have to do this for
3447 * promiscuous mode as well as ALLMCAST mode. It saves the
3448 * Microcode from having keep state about the MAC configuration
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -07003449 */
Mithlesh Thukralb040b072009-01-28 07:08:11 +05303450 /* DBG_ERROR("sxg: %s MacFilter = MAC_ALLMCAST | MAC_PROMISC\n \
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303451 * SLUT MODE!!!\n",__func__);
3452 */
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -07003453 WRITE_REG(sxg_regs->McastLow, 0xFFFFFFFF, FLUSH);
3454 WRITE_REG(sxg_regs->McastHigh, 0xFFFFFFFF, FLUSH);
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303455 /* DBG_ERROR("%s (%s) WRITE to slic_regs slic_mcastlow&high \
3456 * 0xFFFFFFFF\n",__func__, adapter->netdev->name);
3457 */
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -07003458
3459 } else {
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05303460 /*
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303461 * Commit our multicast mast to the SLIC by writing to the
3462 * multicast address mask registers
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -07003463 */
3464 DBG_ERROR("%s (%s) WRITE mcastlow[%lx] mcasthigh[%lx]\n",
3465 __func__, adapter->netdev->name,
3466 ((ulong) (adapter->MulticastMask & 0xFFFFFFFF)),
3467 ((ulong)
3468 ((adapter->MulticastMask >> 32) & 0xFFFFFFFF)));
3469
3470 WRITE_REG(sxg_regs->McastLow,
3471 (u32) (adapter->MulticastMask & 0xFFFFFFFF), FLUSH);
3472 WRITE_REG(sxg_regs->McastHigh,
3473 (u32) ((adapter->
3474 MulticastMask >> 32) & 0xFFFFFFFF), FLUSH);
3475 }
3476}
3477
J.R. Mauro73b07062008-10-28 18:42:02 -04003478static void sxg_mcast_set_bit(struct adapter_t *adapter, char *address)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003479{
3480 unsigned char crcpoly;
3481
3482 /* Get the CRC polynomial for the mac address */
3483 crcpoly = sxg_mcast_get_mac_hash(address);
3484
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05303485 /*
3486 * We only have space on the SLIC for 64 entries. Lop
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003487 * off the top two bits. (2^6 = 64)
3488 */
3489 crcpoly &= 0x3F;
3490
3491 /* OR in the new bit into our 64 bit mask. */
3492 adapter->MulticastMask |= (u64) 1 << crcpoly;
3493}
Mithlesh Thukralb040b072009-01-28 07:08:11 +05303494
3495/*
3496 * Function takes MAC addresses from dev_mc_list and generates the Mask
3497 */
3498
3499static void sxg_set_mcast_addr(struct adapter_t *adapter)
3500{
3501 struct dev_mc_list *mclist;
3502 struct net_device *dev = adapter->netdev;
3503 int i;
3504
3505 if (adapter->MacFilter & (MAC_ALLMCAST | MAC_MCAST)) {
3506 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
3507 i++, mclist = mclist->next) {
3508 sxg_mcast_set_bit(adapter,mclist->da_addr);
3509 }
3510 }
3511 sxg_mcast_set_mask(adapter);
3512}
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003513
Mithlesh Thukral942798b2009-01-05 21:14:34 +05303514static void sxg_mcast_set_list(struct net_device *dev)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003515{
J.R. Mauro73b07062008-10-28 18:42:02 -04003516 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003517
3518 ASSERT(adapter);
Mithlesh Thukral559990c2009-01-30 20:20:19 +05303519 if (dev->flags & IFF_PROMISC)
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05303520 adapter->MacFilter |= MAC_PROMISC;
Mithlesh Thukralb040b072009-01-28 07:08:11 +05303521 if (dev->flags & IFF_MULTICAST)
3522 adapter->MacFilter |= MAC_MCAST;
Mithlesh Thukral559990c2009-01-30 20:20:19 +05303523 if (dev->flags & IFF_ALLMULTI)
Mithlesh Thukralb040b072009-01-28 07:08:11 +05303524 adapter->MacFilter |= MAC_ALLMCAST;
Mithlesh Thukralb040b072009-01-28 07:08:11 +05303525
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05303526 //XXX handle other flags as well
Mithlesh Thukralb040b072009-01-28 07:08:11 +05303527 sxg_set_mcast_addr(adapter);
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05303528}
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003529
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303530void sxg_free_sgl_buffers(struct adapter_t *adapter)
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303531{
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303532 struct list_entry *ple;
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303533 struct sxg_scatter_gather *Sgl;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003534
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303535 while(!(IsListEmpty(&adapter->AllSglBuffers))) {
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05303536 ple = RemoveHeadList(&adapter->AllSglBuffers);
3537 Sgl = container_of(ple, struct sxg_scatter_gather, AllList);
3538 kfree(Sgl);
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303539 adapter->AllSglBufferCount--;
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303540 }
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303541}
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303542
3543void sxg_free_rcvblocks(struct adapter_t *adapter)
3544{
3545 u32 i;
3546 void *temp_RcvBlock;
3547 struct list_entry *ple;
3548 struct sxg_rcv_block_hdr *RcvBlockHdr;
3549 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
3550 ASSERT((adapter->state == SXG_STATE_INITIALIZING) ||
3551 (adapter->state == SXG_STATE_HALTING));
3552 while(!(IsListEmpty(&adapter->AllRcvBlocks))) {
3553
3554 ple = RemoveHeadList(&adapter->AllRcvBlocks);
3555 RcvBlockHdr = container_of(ple, struct sxg_rcv_block_hdr, AllList);
3556
3557 if(RcvBlockHdr->VirtualAddress) {
3558 temp_RcvBlock = RcvBlockHdr->VirtualAddress;
3559
3560 for(i=0; i< SXG_RCV_DESCRIPTORS_PER_BLOCK;
3561 i++, temp_RcvBlock += SXG_RCV_DATA_HDR_SIZE) {
3562 RcvDataBufferHdr =
3563 (struct sxg_rcv_data_buffer_hdr *)temp_RcvBlock;
3564 SXG_FREE_RCV_PACKET(RcvDataBufferHdr);
3565 }
3566 }
3567
3568 pci_free_consistent(adapter->pcidev,
3569 SXG_RCV_BLOCK_SIZE(SXG_RCV_DATA_HDR_SIZE),
3570 RcvBlockHdr->VirtualAddress,
3571 RcvBlockHdr->PhysicalAddress);
3572 adapter->AllRcvBlockCount--;
3573 }
3574 ASSERT(adapter->AllRcvBlockCount == 0);
3575 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFrRBlk",
3576 adapter, 0, 0, 0);
3577}
3578void sxg_free_mcast_addrs(struct adapter_t *adapter)
3579{
3580 struct sxg_multicast_address *address;
3581 while(adapter->MulticastAddrs) {
3582 address = adapter->MulticastAddrs;
3583 adapter->MulticastAddrs = address->Next;
3584 kfree(address);
3585 }
3586
3587 adapter->MulticastMask= 0;
3588}
3589
3590void sxg_unmap_resources(struct adapter_t *adapter)
3591{
3592 if(adapter->HwRegs) {
3593 iounmap((void *)adapter->HwRegs);
3594 }
3595 if(adapter->UcodeRegs) {
3596 iounmap((void *)adapter->UcodeRegs);
3597 }
3598
3599 ASSERT(adapter->AllRcvBlockCount == 0);
3600 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFrRBlk",
3601 adapter, 0, 0, 0);
3602}
3603
3604
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303605
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003606/*
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303607 * sxg_free_resources - Free everything allocated in SxgAllocateResources
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003608 *
3609 * Arguments -
3610 * adapter - A pointer to our adapter structure
3611 *
3612 * Return
3613 * none
3614 */
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303615void sxg_free_resources(struct adapter_t *adapter)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003616{
3617 u32 RssIds, IsrCount;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003618 RssIds = SXG_RSS_CPU_COUNT(adapter);
Mithlesh Thukral1782199f2009-02-06 19:32:28 +05303619 IsrCount = adapter->msi_enabled ? RssIds : 1;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003620
3621 if (adapter->BasicAllocations == FALSE) {
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05303622 /*
3623 * No allocations have been made, including spinlocks,
3624 * or listhead initializations. Return.
3625 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003626 return;
3627 }
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303628
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003629 if (!(IsListEmpty(&adapter->AllRcvBlocks))) {
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303630 sxg_free_rcvblocks(adapter);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003631 }
3632 if (!(IsListEmpty(&adapter->AllSglBuffers))) {
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303633 sxg_free_sgl_buffers(adapter);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003634 }
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303635
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003636 if (adapter->XmtRingZeroIndex) {
3637 pci_free_consistent(adapter->pcidev,
3638 sizeof(u32),
3639 adapter->XmtRingZeroIndex,
3640 adapter->PXmtRingZeroIndex);
3641 }
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303642 if (adapter->Isr) {
3643 pci_free_consistent(adapter->pcidev,
3644 sizeof(u32) * IsrCount,
3645 adapter->Isr, adapter->PIsr);
3646 }
3647
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303648 if (adapter->EventRings) {
3649 pci_free_consistent(adapter->pcidev,
3650 sizeof(struct sxg_event_ring) * RssIds,
3651 adapter->EventRings, adapter->PEventRings);
3652 }
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303653 if (adapter->RcvRings) {
3654 pci_free_consistent(adapter->pcidev,
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303655 sizeof(struct sxg_rcv_ring) * 1,
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303656 adapter->RcvRings,
3657 adapter->PRcvRings);
3658 adapter->RcvRings = NULL;
3659 }
3660
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303661 if(adapter->XmtRings) {
3662 pci_free_consistent(adapter->pcidev,
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303663 sizeof(struct sxg_xmt_ring) * 1,
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303664 adapter->XmtRings,
3665 adapter->PXmtRings);
3666 adapter->XmtRings = NULL;
3667 }
3668
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303669 if (adapter->ucode_stats) {
3670 pci_unmap_single(adapter->pcidev,
3671 sizeof(struct sxg_ucode_stats),
3672 adapter->pucode_stats, PCI_DMA_FROMDEVICE);
3673 adapter->ucode_stats = NULL;
3674 }
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303675
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003676
J.R. Maurob243c4a2008-10-20 19:28:58 -04003677 /* Unmap register spaces */
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303678 sxg_unmap_resources(adapter);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003679
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303680 sxg_free_mcast_addrs(adapter);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003681
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003682 adapter->BasicAllocations = FALSE;
3683
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003684}
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003685
3686/*
3687 * sxg_allocate_complete -
3688 *
3689 * This routine is called when a memory allocation has completed.
3690 *
3691 * Arguments -
J.R. Mauro73b07062008-10-28 18:42:02 -04003692 * struct adapter_t * - Our adapter structure
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003693 * VirtualAddress - Memory virtual address
3694 * PhysicalAddress - Memory physical address
3695 * Length - Length of memory allocated (or 0)
3696 * Context - The type of buffer allocated
3697 *
3698 * Return
3699 * None.
3700 */
Mithlesh Thukral0d414722009-01-19 20:29:59 +05303701static int sxg_allocate_complete(struct adapter_t *adapter,
J.R. Mauro5c7514e2008-10-05 20:38:52 -04003702 void *VirtualAddress,
3703 dma_addr_t PhysicalAddress,
Mithlesh Thukral942798b2009-01-05 21:14:34 +05303704 u32 Length, enum sxg_buffer_type Context)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003705{
Mithlesh Thukral0d414722009-01-19 20:29:59 +05303706 int status = 0;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003707 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocCmp",
3708 adapter, VirtualAddress, Length, Context);
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05303709 ASSERT(atomic_read(&adapter->pending_allocations));
3710 atomic_dec(&adapter->pending_allocations);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003711
3712 switch (Context) {
3713
3714 case SXG_BUFFER_TYPE_RCV:
Mithlesh Thukral0d414722009-01-19 20:29:59 +05303715 status = sxg_allocate_rcvblock_complete(adapter,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003716 VirtualAddress,
3717 PhysicalAddress, Length);
3718 break;
3719 case SXG_BUFFER_TYPE_SGL:
Mithlesh Thukral942798b2009-01-05 21:14:34 +05303720 sxg_allocate_sgl_buffer_complete(adapter, (struct sxg_scatter_gather *)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003721 VirtualAddress,
3722 PhysicalAddress, Length);
3723 break;
3724 }
3725 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlocCmp",
3726 adapter, VirtualAddress, Length, Context);
Mithlesh Thukral0d414722009-01-19 20:29:59 +05303727
3728 return status;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003729}
3730
3731/*
3732 * sxg_allocate_buffer_memory - Shared memory allocation routine used for
3733 * synchronous and asynchronous buffer allocations
3734 *
3735 * Arguments -
3736 * adapter - A pointer to our adapter structure
3737 * Size - block size to allocate
3738 * BufferType - Type of buffer to allocate
3739 *
3740 * Return
3741 * int
3742 */
J.R. Mauro73b07062008-10-28 18:42:02 -04003743static int sxg_allocate_buffer_memory(struct adapter_t *adapter,
Mithlesh Thukral942798b2009-01-05 21:14:34 +05303744 u32 Size, enum sxg_buffer_type BufferType)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003745{
3746 int status;
J.R. Mauro5c7514e2008-10-05 20:38:52 -04003747 void *Buffer;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003748 dma_addr_t pBuffer;
3749
3750 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocMem",
3751 adapter, Size, BufferType, 0);
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05303752 /*
3753 * Grab the adapter lock and check the state. If we're in anything other
3754 * than INITIALIZING or RUNNING state, fail. This is to prevent
3755 * allocations in an improper driver state
3756 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003757
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05303758 atomic_inc(&adapter->pending_allocations);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003759
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303760 if(BufferType != SXG_BUFFER_TYPE_SGL)
3761 Buffer = pci_alloc_consistent(adapter->pcidev, Size, &pBuffer);
3762 else {
3763 Buffer = kzalloc(Size, GFP_ATOMIC);
Mithlesh Thukral54aed112009-01-19 20:27:17 +05303764 pBuffer = (dma_addr_t)NULL;
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303765 }
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003766 if (Buffer == NULL) {
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05303767 /*
3768 * Decrement the AllocationsPending count while holding
3769 * the lock. Pause processing relies on this
3770 */
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05303771 atomic_dec(&adapter->pending_allocations);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003772 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlcMemF1",
3773 adapter, Size, BufferType, 0);
3774 return (STATUS_RESOURCES);
3775 }
Mithlesh Thukral0d414722009-01-19 20:29:59 +05303776 status = sxg_allocate_complete(adapter, Buffer, pBuffer, Size, BufferType);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003777
3778 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlocMem",
3779 adapter, Size, BufferType, status);
Mithlesh Thukral0d414722009-01-19 20:29:59 +05303780 return status;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003781}
3782
3783/*
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303784 * sxg_allocate_rcvblock_complete - Complete a receive descriptor
3785 * block allocation
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003786 *
3787 * Arguments -
3788 * adapter - A pointer to our adapter structure
3789 * RcvBlock - receive block virtual address
3790 * PhysicalAddress - Physical address
3791 * Length - Memory length
3792 *
3793 * Return
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003794 */
Mithlesh Thukral0d414722009-01-19 20:29:59 +05303795static int sxg_allocate_rcvblock_complete(struct adapter_t *adapter,
J.R. Mauro5c7514e2008-10-05 20:38:52 -04003796 void *RcvBlock,
3797 dma_addr_t PhysicalAddress,
3798 u32 Length)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003799{
3800 u32 i;
3801 u32 BufferSize = adapter->ReceiveBufferSize;
3802 u64 Paddr;
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303803 void *temp_RcvBlock;
Mithlesh Thukral942798b2009-01-05 21:14:34 +05303804 struct sxg_rcv_block_hdr *RcvBlockHdr;
Mithlesh Thukral942798b2009-01-05 21:14:34 +05303805 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
3806 struct sxg_rcv_descriptor_block *RcvDescriptorBlock;
3807 struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003808
3809 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlRcvBlk",
3810 adapter, RcvBlock, Length, 0);
3811 if (RcvBlock == NULL) {
3812 goto fail;
3813 }
3814 memset(RcvBlock, 0, Length);
3815 ASSERT((BufferSize == SXG_RCV_DATA_BUFFER_SIZE) ||
3816 (BufferSize == SXG_RCV_JUMBO_BUFFER_SIZE));
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303817 ASSERT(Length == SXG_RCV_BLOCK_SIZE(SXG_RCV_DATA_HDR_SIZE));
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05303818 /*
3819 * First, initialize the contained pool of receive data buffers.
3820 * This initialization requires NBL/NB/MDL allocations, if any of them
3821 * fail, free the block and return without queueing the shared memory
3822 */
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303823 //RcvDataBuffer = RcvBlock;
3824 temp_RcvBlock = RcvBlock;
3825 for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
3826 i++, temp_RcvBlock += SXG_RCV_DATA_HDR_SIZE) {
3827 RcvDataBufferHdr = (struct sxg_rcv_data_buffer_hdr *)
3828 temp_RcvBlock;
3829 /* For FREE macro assertion */
3830 RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM;
3831 SXG_ALLOCATE_RCV_PACKET(adapter, RcvDataBufferHdr, BufferSize);
3832 if (RcvDataBufferHdr->SxgDumbRcvPacket == NULL)
3833 goto fail;
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05303834
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303835 }
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003836
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05303837 /*
3838 * Place this entire block of memory on the AllRcvBlocks queue so it
3839 * can be free later
3840 */
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303841
3842 RcvBlockHdr = (struct sxg_rcv_block_hdr *) ((unsigned char *)RcvBlock +
3843 SXG_RCV_BLOCK_HDR_OFFSET(SXG_RCV_DATA_HDR_SIZE));
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003844 RcvBlockHdr->VirtualAddress = RcvBlock;
3845 RcvBlockHdr->PhysicalAddress = PhysicalAddress;
3846 spin_lock(&adapter->RcvQLock);
3847 adapter->AllRcvBlockCount++;
3848 InsertTailList(&adapter->AllRcvBlocks, &RcvBlockHdr->AllList);
3849 spin_unlock(&adapter->RcvQLock);
3850
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303851 /* Now free the contained receive data buffers that we
3852 * initialized above */
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303853 temp_RcvBlock = RcvBlock;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003854 for (i = 0, Paddr = PhysicalAddress;
3855 i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303856 i++, Paddr += SXG_RCV_DATA_HDR_SIZE,
3857 temp_RcvBlock += SXG_RCV_DATA_HDR_SIZE) {
3858 RcvDataBufferHdr =
3859 (struct sxg_rcv_data_buffer_hdr *)temp_RcvBlock;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003860 spin_lock(&adapter->RcvQLock);
3861 SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
3862 spin_unlock(&adapter->RcvQLock);
3863 }
3864
J.R. Maurob243c4a2008-10-20 19:28:58 -04003865 /* Locate the descriptor block and put it on a separate free queue */
J.R. Mauro5c7514e2008-10-05 20:38:52 -04003866 RcvDescriptorBlock =
Mithlesh Thukral942798b2009-01-05 21:14:34 +05303867 (struct sxg_rcv_descriptor_block *) ((unsigned char *)RcvBlock +
J.R. Mauro5c7514e2008-10-05 20:38:52 -04003868 SXG_RCV_DESCRIPTOR_BLOCK_OFFSET
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303869 (SXG_RCV_DATA_HDR_SIZE));
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003870 RcvDescriptorBlockHdr =
Mithlesh Thukral942798b2009-01-05 21:14:34 +05303871 (struct sxg_rcv_descriptor_block_hdr *) ((unsigned char *)RcvBlock +
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003872 SXG_RCV_DESCRIPTOR_BLOCK_HDR_OFFSET
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303873 (SXG_RCV_DATA_HDR_SIZE));
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003874 RcvDescriptorBlockHdr->VirtualAddress = RcvDescriptorBlock;
3875 RcvDescriptorBlockHdr->PhysicalAddress = Paddr;
3876 spin_lock(&adapter->RcvQLock);
3877 SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter, RcvDescriptorBlockHdr);
3878 spin_unlock(&adapter->RcvQLock);
3879 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlRBlk",
3880 adapter, RcvBlock, Length, 0);
Mithlesh Thukral0d414722009-01-19 20:29:59 +05303881 return STATUS_SUCCESS;
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303882fail:
J.R. Maurob243c4a2008-10-20 19:28:58 -04003883 /* Free any allocated resources */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003884 if (RcvBlock) {
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303885 temp_RcvBlock = RcvBlock;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003886 for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303887 i++, temp_RcvBlock += SXG_RCV_DATA_HDR_SIZE) {
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003888 RcvDataBufferHdr =
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05303889 (struct sxg_rcv_data_buffer_hdr *)temp_RcvBlock;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003890 SXG_FREE_RCV_PACKET(RcvDataBufferHdr);
3891 }
3892 pci_free_consistent(adapter->pcidev,
3893 Length, RcvBlock, PhysicalAddress);
3894 }
Harvey Harrisone88bd232008-10-17 14:46:10 -07003895 DBG_ERROR("%s: OUT OF RESOURCES\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003896 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "RcvAFail",
3897 adapter, adapter->FreeRcvBufferCount,
3898 adapter->FreeRcvBlockCount, adapter->AllRcvBlockCount);
3899 adapter->Stats.NoMem++;
Mithlesh Thukral0d414722009-01-19 20:29:59 +05303900 /* As allocation failed, free all previously allocated blocks..*/
3901 //sxg_free_rcvblocks(adapter);
3902
3903 return STATUS_RESOURCES;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003904}
3905
3906/*
3907 * sxg_allocate_sgl_buffer_complete - Complete a SGL buffer allocation
3908 *
3909 * Arguments -
3910 * adapter - A pointer to our adapter structure
Mithlesh Thukral942798b2009-01-05 21:14:34 +05303911 * SxgSgl - struct sxg_scatter_gather buffer
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003912 * PhysicalAddress - Physical address
3913 * Length - Memory length
3914 *
3915 * Return
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003916 */
J.R. Mauro73b07062008-10-28 18:42:02 -04003917static void sxg_allocate_sgl_buffer_complete(struct adapter_t *adapter,
Mithlesh Thukral942798b2009-01-05 21:14:34 +05303918 struct sxg_scatter_gather *SxgSgl,
J.R. Mauro5c7514e2008-10-05 20:38:52 -04003919 dma_addr_t PhysicalAddress,
3920 u32 Length)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003921{
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303922 unsigned long sgl_flags;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003923 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlSglCmp",
3924 adapter, SxgSgl, Length, 0);
Mithlesh Thukralc5e5cf52009-02-06 19:31:40 +05303925 spin_lock_irqsave(&adapter->SglQLock, sgl_flags);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003926 adapter->AllSglBufferCount++;
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05303927 /* PhysicalAddress; */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303928 SxgSgl->PhysicalAddress = PhysicalAddress;
3929 /* Initialize backpointer once */
3930 SxgSgl->adapter = adapter;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003931 InsertTailList(&adapter->AllSglBuffers, &SxgSgl->AllList);
Mithlesh Thukralc5e5cf52009-02-06 19:31:40 +05303932 spin_unlock_irqrestore(&adapter->SglQLock, sgl_flags);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003933 SxgSgl->State = SXG_BUFFER_BUSY;
Mithlesh Thukralc5e5cf52009-02-06 19:31:40 +05303934 SXG_FREE_SGL_BUFFER(adapter, SxgSgl, NULL);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003935 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlSgl",
3936 adapter, SxgSgl, Length, 0);
3937}
3938
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003939
Mithlesh Thukral54aed112009-01-19 20:27:17 +05303940static int sxg_adapter_set_hwaddr(struct adapter_t *adapter)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003941{
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303942 /*
3943 * DBG_ERROR ("%s ENTER card->config_set[%x] port[%d] physport[%d] \
3944 * funct#[%d]\n", __func__, card->config_set,
3945 * adapter->port, adapter->physport, adapter->functionnumber);
3946 *
3947 * sxg_dbg_macaddrs(adapter);
3948 */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303949 /* DBG_ERROR ("%s AFTER copying from config.macinfo into currmacaddr\n",
3950 * __FUNCTION__);
3951 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003952
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303953 /* sxg_dbg_macaddrs(adapter); */
3954
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05303955 struct net_device * dev = adapter->netdev;
3956 if(!dev)
3957 {
3958 printk("sxg: Dev is Null\n");
3959 }
3960
3961 DBG_ERROR("%s ENTER (%s)\n", __FUNCTION__, adapter->netdev->name);
3962
3963 if (netif_running(dev)) {
3964 return -EBUSY;
3965 }
3966 if (!adapter) {
3967 return -EBUSY;
3968 }
3969
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003970 if (!(adapter->currmacaddr[0] ||
3971 adapter->currmacaddr[1] ||
3972 adapter->currmacaddr[2] ||
3973 adapter->currmacaddr[3] ||
3974 adapter->currmacaddr[4] || adapter->currmacaddr[5])) {
3975 memcpy(adapter->currmacaddr, adapter->macaddr, 6);
3976 }
3977 if (adapter->netdev) {
3978 memcpy(adapter->netdev->dev_addr, adapter->currmacaddr, 6);
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05303979 memcpy(adapter->netdev->perm_addr, adapter->currmacaddr, 6);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003980 }
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05303981 /* DBG_ERROR ("%s EXIT port %d\n", __func__, adapter->port); */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003982 sxg_dbg_macaddrs(adapter);
3983
Mithlesh Thukral54aed112009-01-19 20:27:17 +05303984 return 0;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003985}
3986
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -07003987#if XXXTODO
Mithlesh Thukral942798b2009-01-05 21:14:34 +05303988static int sxg_mac_set_address(struct net_device *dev, void *ptr)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003989{
J.R. Mauro73b07062008-10-28 18:42:02 -04003990 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003991 struct sockaddr *addr = ptr;
3992
Harvey Harrisone88bd232008-10-17 14:46:10 -07003993 DBG_ERROR("%s ENTER (%s)\n", __func__, adapter->netdev->name);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07003994
3995 if (netif_running(dev)) {
3996 return -EBUSY;
3997 }
3998 if (!adapter) {
3999 return -EBUSY;
4000 }
4001 DBG_ERROR("sxg: %s (%s) curr %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -07004002 __func__, adapter->netdev->name, adapter->currmacaddr[0],
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004003 adapter->currmacaddr[1], adapter->currmacaddr[2],
4004 adapter->currmacaddr[3], adapter->currmacaddr[4],
4005 adapter->currmacaddr[5]);
4006 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4007 memcpy(adapter->currmacaddr, addr->sa_data, dev->addr_len);
4008 DBG_ERROR("sxg: %s (%s) new %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
Harvey Harrisone88bd232008-10-17 14:46:10 -07004009 __func__, adapter->netdev->name, adapter->currmacaddr[0],
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004010 adapter->currmacaddr[1], adapter->currmacaddr[2],
4011 adapter->currmacaddr[3], adapter->currmacaddr[4],
4012 adapter->currmacaddr[5]);
4013
4014 sxg_config_set(adapter, TRUE);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004015 return 0;
4016}
Greg Kroah-Hartmanc6c25ed2008-10-21 10:41:45 -07004017#endif
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004018
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004019/*
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05304020 * SXG DRIVER FUNCTIONS (below)
4021 *
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004022 * sxg_initialize_adapter - Initialize adapter
4023 *
4024 * Arguments -
4025 * adapter - A pointer to our adapter structure
4026 *
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05304027 * Return - int
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004028 */
J.R. Mauro73b07062008-10-28 18:42:02 -04004029static int sxg_initialize_adapter(struct adapter_t *adapter)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004030{
4031 u32 RssIds, IsrCount;
4032 u32 i;
4033 int status;
Mithlesh Thukral7c66b142009-02-06 19:30:40 +05304034 int sxg_rcv_ring_size = SXG_RCV_RING_SIZE;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004035
4036 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "InitAdpt",
4037 adapter, 0, 0, 0);
4038
J.R. Maurob243c4a2008-10-20 19:28:58 -04004039 RssIds = 1; /* XXXTODO SXG_RSS_CPU_COUNT(adapter); */
Mithlesh Thukral1782199f2009-02-06 19:32:28 +05304040 IsrCount = adapter->msi_enabled ? RssIds : 1;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004041
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05304042 /*
4043 * Sanity check SXG_UCODE_REGS structure definition to
4044 * make sure the length is correct
4045 */
Mithlesh Thukral942798b2009-01-05 21:14:34 +05304046 ASSERT(sizeof(struct sxg_ucode_regs) == SXG_REGISTER_SIZE_PER_CPU);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004047
J.R. Maurob243c4a2008-10-20 19:28:58 -04004048 /* Disable interrupts */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004049 SXG_DISABLE_ALL_INTERRUPTS(adapter);
4050
J.R. Maurob243c4a2008-10-20 19:28:58 -04004051 /* Set MTU */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004052 ASSERT((adapter->FrameSize == ETHERMAXFRAME) ||
4053 (adapter->FrameSize == JUMBOMAXFRAME));
4054 WRITE_REG(adapter->UcodeRegs[0].LinkMtu, adapter->FrameSize, TRUE);
4055
J.R. Maurob243c4a2008-10-20 19:28:58 -04004056 /* Set event ring base address and size */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004057 WRITE_REG64(adapter,
4058 adapter->UcodeRegs[0].EventBase, adapter->PEventRings, 0);
4059 WRITE_REG(adapter->UcodeRegs[0].EventSize, EVENT_RING_SIZE, TRUE);
4060
J.R. Maurob243c4a2008-10-20 19:28:58 -04004061 /* Per-ISR initialization */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004062 for (i = 0; i < IsrCount; i++) {
4063 u64 Addr;
J.R. Maurob243c4a2008-10-20 19:28:58 -04004064 /* Set interrupt status pointer */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004065 Addr = adapter->PIsr + (i * sizeof(u32));
4066 WRITE_REG64(adapter, adapter->UcodeRegs[i].Isp, Addr, i);
4067 }
4068
J.R. Maurob243c4a2008-10-20 19:28:58 -04004069 /* XMT ring zero index */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004070 WRITE_REG64(adapter,
4071 adapter->UcodeRegs[0].SPSendIndex,
4072 adapter->PXmtRingZeroIndex, 0);
4073
J.R. Maurob243c4a2008-10-20 19:28:58 -04004074 /* Per-RSS initialization */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004075 for (i = 0; i < RssIds; i++) {
J.R. Maurob243c4a2008-10-20 19:28:58 -04004076 /* Release all event ring entries to the Microcode */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004077 WRITE_REG(adapter->UcodeRegs[i].EventRelease, EVENT_RING_SIZE,
4078 TRUE);
4079 }
4080
J.R. Maurob243c4a2008-10-20 19:28:58 -04004081 /* Transmit ring base and size */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004082 WRITE_REG64(adapter,
4083 adapter->UcodeRegs[0].XmtBase, adapter->PXmtRings, 0);
4084 WRITE_REG(adapter->UcodeRegs[0].XmtSize, SXG_XMT_RING_SIZE, TRUE);
4085
J.R. Maurob243c4a2008-10-20 19:28:58 -04004086 /* Receive ring base and size */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004087 WRITE_REG64(adapter,
4088 adapter->UcodeRegs[0].RcvBase, adapter->PRcvRings, 0);
Mithlesh Thukral7c66b142009-02-06 19:30:40 +05304089 if (adapter->JumboEnabled == TRUE)
4090 sxg_rcv_ring_size = SXG_JUMBO_RCV_RING_SIZE;
4091 WRITE_REG(adapter->UcodeRegs[0].RcvSize, sxg_rcv_ring_size, TRUE);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004092
J.R. Maurob243c4a2008-10-20 19:28:58 -04004093 /* Populate the card with receive buffers */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004094 sxg_stock_rcv_buffers(adapter);
4095
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05304096 /*
4097 * Initialize checksum offload capabilities. At the moment we always
4098 * enable IP and TCP receive checksums on the card. Depending on the
4099 * checksum configuration specified by the user, we can choose to
4100 * report or ignore the checksum information provided by the card.
4101 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004102 WRITE_REG(adapter->UcodeRegs[0].ReceiveChecksum,
4103 SXG_RCV_TCP_CSUM_ENABLED | SXG_RCV_IP_CSUM_ENABLED, TRUE);
4104
Mithlesh Thukral9914f052009-02-18 18:51:29 +05304105 adapter->flags |= (SXG_RCV_TCP_CSUM_ENABLED | SXG_RCV_IP_CSUM_ENABLED );
4106
J.R. Maurob243c4a2008-10-20 19:28:58 -04004107 /* Initialize the MAC, XAUI */
Harvey Harrisone88bd232008-10-17 14:46:10 -07004108 DBG_ERROR("sxg: %s ENTER sxg_initialize_link\n", __func__);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004109 status = sxg_initialize_link(adapter);
Harvey Harrisone88bd232008-10-17 14:46:10 -07004110 DBG_ERROR("sxg: %s EXIT sxg_initialize_link status[%x]\n", __func__,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004111 status);
4112 if (status != STATUS_SUCCESS) {
4113 return (status);
4114 }
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05304115 /*
4116 * Initialize Dead to FALSE.
4117 * SlicCheckForHang or SlicDumpThread will take it from here.
4118 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004119 adapter->Dead = FALSE;
4120 adapter->PingOutstanding = FALSE;
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05304121 adapter->State = SXG_STATE_RUNNING;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004122
4123 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XInit",
4124 adapter, 0, 0, 0);
4125 return (STATUS_SUCCESS);
4126}
4127
4128/*
4129 * sxg_fill_descriptor_block - Populate a descriptor block and give it to
4130 * the card. The caller should hold the RcvQLock
4131 *
4132 * Arguments -
4133 * adapter - A pointer to our adapter structure
4134 * RcvDescriptorBlockHdr - Descriptor block to fill
4135 *
4136 * Return
4137 * status
4138 */
J.R. Mauro73b07062008-10-28 18:42:02 -04004139static int sxg_fill_descriptor_block(struct adapter_t *adapter,
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05304140 struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004141{
4142 u32 i;
Mithlesh Thukral942798b2009-01-05 21:14:34 +05304143 struct sxg_ring_info *RcvRingInfo = &adapter->RcvRingZeroInfo;
4144 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
4145 struct sxg_rcv_descriptor_block *RcvDescriptorBlock;
4146 struct sxg_cmd *RingDescriptorCmd;
4147 struct sxg_rcv_ring *RingZero = &adapter->RcvRings[0];
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004148
4149 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "FilBlk",
4150 adapter, adapter->RcvBuffersOnCard,
4151 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
4152
4153 ASSERT(RcvDescriptorBlockHdr);
4154
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05304155 /*
4156 * If we don't have the resources to fill the descriptor block,
4157 * return failure
4158 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004159 if ((adapter->FreeRcvBufferCount < SXG_RCV_DESCRIPTORS_PER_BLOCK) ||
4160 SXG_RING_FULL(RcvRingInfo)) {
4161 adapter->Stats.NoMem++;
4162 return (STATUS_FAILURE);
4163 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04004164 /* Get a ring descriptor command */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004165 SXG_GET_CMD(RingZero,
4166 RcvRingInfo, RingDescriptorCmd, RcvDescriptorBlockHdr);
4167 ASSERT(RingDescriptorCmd);
4168 RcvDescriptorBlockHdr->State = SXG_BUFFER_ONCARD;
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05304169 RcvDescriptorBlock = (struct sxg_rcv_descriptor_block *)
4170 RcvDescriptorBlockHdr->VirtualAddress;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004171
J.R. Maurob243c4a2008-10-20 19:28:58 -04004172 /* Fill in the descriptor block */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004173 for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK; i++) {
4174 SXG_GET_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
4175 ASSERT(RcvDataBufferHdr);
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05304176// ASSERT(RcvDataBufferHdr->SxgDumbRcvPacket);
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05304177 if (!RcvDataBufferHdr->SxgDumbRcvPacket) {
4178 SXG_ALLOCATE_RCV_PACKET(adapter, RcvDataBufferHdr,
4179 adapter->ReceiveBufferSize);
4180 if(RcvDataBufferHdr->skb)
4181 RcvDataBufferHdr->SxgDumbRcvPacket =
4182 RcvDataBufferHdr->skb;
4183 else
4184 goto no_memory;
4185 }
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004186 SXG_REINIATIALIZE_PACKET(RcvDataBufferHdr->SxgDumbRcvPacket);
4187 RcvDataBufferHdr->State = SXG_BUFFER_ONCARD;
J.R. Mauro5c7514e2008-10-05 20:38:52 -04004188 RcvDescriptorBlock->Descriptors[i].VirtualAddress =
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05304189 (void *)RcvDataBufferHdr;
Mithlesh Thukral1323e5f2009-01-05 21:13:23 +05304190
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004191 RcvDescriptorBlock->Descriptors[i].PhysicalAddress =
4192 RcvDataBufferHdr->PhysicalAddress;
4193 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04004194 /* Add the descriptor block to receive descriptor ring 0 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004195 RingDescriptorCmd->Sgl = RcvDescriptorBlockHdr->PhysicalAddress;
4196
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05304197 /*
4198 * RcvBuffersOnCard is not protected via the receive lock (see
4199 * sxg_process_event_queue) We don't want to grap a lock every time a
4200 * buffer is returned to us, so we use atomic interlocked functions
4201 * instead.
4202 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004203 adapter->RcvBuffersOnCard += SXG_RCV_DESCRIPTORS_PER_BLOCK;
4204
4205 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DscBlk",
4206 RcvDescriptorBlockHdr,
4207 RingDescriptorCmd, RcvRingInfo->Head, RcvRingInfo->Tail);
4208
4209 WRITE_REG(adapter->UcodeRegs[0].RcvCmd, 1, true);
4210 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFilBlk",
4211 adapter, adapter->RcvBuffersOnCard,
4212 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
4213 return (STATUS_SUCCESS);
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05304214no_memory:
4215 return (-ENOMEM);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004216}
4217
4218/*
4219 * sxg_stock_rcv_buffers - Stock the card with receive buffers
4220 *
4221 * Arguments -
4222 * adapter - A pointer to our adapter structure
4223 *
4224 * Return
4225 * None
4226 */
J.R. Mauro73b07062008-10-28 18:42:02 -04004227static void sxg_stock_rcv_buffers(struct adapter_t *adapter)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004228{
Mithlesh Thukral942798b2009-01-05 21:14:34 +05304229 struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr;
Mithlesh Thukral7c66b142009-02-06 19:30:40 +05304230 int sxg_rcv_data_buffers = SXG_RCV_DATA_BUFFERS;
4231 int sxg_min_rcv_data_buffers = SXG_MIN_RCV_DATA_BUFFERS;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004232
4233 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "StockBuf",
4234 adapter, adapter->RcvBuffersOnCard,
4235 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05304236 /*
4237 * First, see if we've got less than our minimum threshold of
4238 * receive buffers, there isn't an allocation in progress, and
4239 * we haven't exceeded our maximum.. get another block of buffers
4240 * None of this needs to be SMP safe. It's round numbers.
4241 */
Mithlesh Thukral7c66b142009-02-06 19:30:40 +05304242 if (adapter->JumboEnabled == TRUE)
4243 sxg_min_rcv_data_buffers = SXG_MIN_JUMBO_RCV_DATA_BUFFERS;
4244 if ((adapter->FreeRcvBufferCount < sxg_min_rcv_data_buffers) &&
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004245 (adapter->AllRcvBlockCount < SXG_MAX_RCV_BLOCKS) &&
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05304246 (atomic_read(&adapter->pending_allocations) == 0)) {
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004247 sxg_allocate_buffer_memory(adapter,
Mithlesh Thukrald0128aa2009-01-05 21:18:04 +05304248 SXG_RCV_BLOCK_SIZE
4249 (SXG_RCV_DATA_HDR_SIZE),
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004250 SXG_BUFFER_TYPE_RCV);
4251 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04004252 /* Now grab the RcvQLock lock and proceed */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004253 spin_lock(&adapter->RcvQLock);
Mithlesh Thukral7c66b142009-02-06 19:30:40 +05304254 if (adapter->JumboEnabled)
4255 sxg_rcv_data_buffers = SXG_JUMBO_RCV_DATA_BUFFERS;
4256 while (adapter->RcvBuffersOnCard < sxg_rcv_data_buffers) {
Mithlesh Thukral942798b2009-01-05 21:14:34 +05304257 struct list_entry *_ple;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004258
J.R. Maurob243c4a2008-10-20 19:28:58 -04004259 /* Get a descriptor block */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004260 RcvDescriptorBlockHdr = NULL;
4261 if (adapter->FreeRcvBlockCount) {
4262 _ple = RemoveHeadList(&adapter->FreeRcvBlocks);
J.R. Mauro5c7514e2008-10-05 20:38:52 -04004263 RcvDescriptorBlockHdr =
Mithlesh Thukral942798b2009-01-05 21:14:34 +05304264 container_of(_ple, struct sxg_rcv_descriptor_block_hdr,
J.R. Mauro5c7514e2008-10-05 20:38:52 -04004265 FreeList);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004266 adapter->FreeRcvBlockCount--;
4267 RcvDescriptorBlockHdr->State = SXG_BUFFER_BUSY;
4268 }
4269
4270 if (RcvDescriptorBlockHdr == NULL) {
J.R. Maurob243c4a2008-10-20 19:28:58 -04004271 /* Bail out.. */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004272 adapter->Stats.NoMem++;
4273 break;
4274 }
J.R. Maurob243c4a2008-10-20 19:28:58 -04004275 /* Fill in the descriptor block and give it to the card */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004276 if (sxg_fill_descriptor_block(adapter, RcvDescriptorBlockHdr) ==
4277 STATUS_FAILURE) {
J.R. Maurob243c4a2008-10-20 19:28:58 -04004278 /* Free the descriptor block */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004279 SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter,
4280 RcvDescriptorBlockHdr);
4281 break;
4282 }
4283 }
4284 spin_unlock(&adapter->RcvQLock);
4285 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFilBlks",
4286 adapter, adapter->RcvBuffersOnCard,
4287 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
4288}
4289
4290/*
4291 * sxg_complete_descriptor_blocks - Return descriptor blocks that have been
4292 * completed by the microcode
4293 *
4294 * Arguments -
4295 * adapter - A pointer to our adapter structure
4296 * Index - Where the microcode is up to
4297 *
4298 * Return
4299 * None
4300 */
J.R. Mauro73b07062008-10-28 18:42:02 -04004301static void sxg_complete_descriptor_blocks(struct adapter_t *adapter,
J.R. Mauro5c7514e2008-10-05 20:38:52 -04004302 unsigned char Index)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004303{
Mithlesh Thukral942798b2009-01-05 21:14:34 +05304304 struct sxg_rcv_ring *RingZero = &adapter->RcvRings[0];
4305 struct sxg_ring_info *RcvRingInfo = &adapter->RcvRingZeroInfo;
4306 struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr;
4307 struct sxg_cmd *RingDescriptorCmd;
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004308
4309 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpRBlks",
4310 adapter, Index, RcvRingInfo->Head, RcvRingInfo->Tail);
4311
J.R. Maurob243c4a2008-10-20 19:28:58 -04004312 /* Now grab the RcvQLock lock and proceed */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004313 spin_lock(&adapter->RcvQLock);
4314 ASSERT(Index != RcvRingInfo->Tail);
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05304315 while (sxg_ring_get_forward_diff(RcvRingInfo, Index,
4316 RcvRingInfo->Tail) > 3) {
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05304317 /*
4318 * Locate the current Cmd (ring descriptor entry), and
4319 * associated receive descriptor block, and advance
4320 * the tail
4321 */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004322 SXG_RETURN_CMD(RingZero,
4323 RcvRingInfo,
4324 RingDescriptorCmd, RcvDescriptorBlockHdr);
4325 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpRBlk",
4326 RcvRingInfo->Head, RcvRingInfo->Tail,
4327 RingDescriptorCmd, RcvDescriptorBlockHdr);
4328
J.R. Maurob243c4a2008-10-20 19:28:58 -04004329 /* Clear the SGL field */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004330 RingDescriptorCmd->Sgl = 0;
Mithlesh Thukralddd6f0a2009-01-05 21:15:29 +05304331 /*
4332 * Attempt to refill it and hand it right back to the
4333 * card. If we fail to refill it, free the descriptor block
4334 * header. The card will be restocked later via the
4335 * RcvBuffersOnCard test
4336 */
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05304337 if (sxg_fill_descriptor_block(adapter,
4338 RcvDescriptorBlockHdr) == STATUS_FAILURE)
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004339 SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter,
4340 RcvDescriptorBlockHdr);
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004341 }
4342 spin_unlock(&adapter->RcvQLock);
4343 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XCRBlks",
4344 adapter, Index, RcvRingInfo->Head, RcvRingInfo->Tail);
4345}
4346
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05304347/*
4348 * Read the statistics which the card has been maintaining.
4349 */
4350void sxg_collect_statistics(struct adapter_t *adapter)
4351{
4352 if(adapter->ucode_stats)
Mithlesh Thukral54aed112009-01-19 20:27:17 +05304353 WRITE_REG64(adapter, adapter->UcodeRegs[0].GetUcodeStats,
4354 adapter->pucode_stats, 0);
Mithlesh Thukral6a2946b2009-01-19 20:24:30 +05304355 adapter->stats.rx_fifo_errors = adapter->ucode_stats->ERDrops;
4356 adapter->stats.rx_over_errors = adapter->ucode_stats->NBDrops;
4357 adapter->stats.tx_fifo_errors = adapter->ucode_stats->XDrops;
4358}
4359
4360static struct net_device_stats *sxg_get_stats(struct net_device * dev)
4361{
4362 struct adapter_t *adapter = netdev_priv(dev);
4363
4364 sxg_collect_statistics(adapter);
4365 return (&adapter->stats);
Mithlesh Thukrald9d578b2009-01-19 20:23:22 +05304366}
4367
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004368static struct pci_driver sxg_driver = {
Mithlesh Thukral371d7a92009-01-19 20:22:34 +05304369 .name = sxg_driver_name,
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004370 .id_table = sxg_pci_tbl,
4371 .probe = sxg_entry_probe,
4372 .remove = sxg_entry_remove,
4373#if SXG_POWER_MANAGEMENT_ENABLED
4374 .suspend = sxgpm_suspend,
4375 .resume = sxgpm_resume,
4376#endif
Mithlesh Thukralcb636fe2009-01-05 21:16:56 +05304377 /* .shutdown = slic_shutdown, MOOK_INVESTIGATE */
Greg Kroah-Hartman5db6b772008-08-21 14:04:55 -07004378};
4379
4380static int __init sxg_module_init(void)
4381{
4382 sxg_init_driver();
4383
4384 if (debug >= 0)
4385 sxg_debug = debug;
4386
4387 return pci_register_driver(&sxg_driver);
4388}
4389
4390static void __exit sxg_module_cleanup(void)
4391{
4392 pci_unregister_driver(&sxg_driver);
4393}
4394
4395module_init(sxg_module_init);
4396module_exit(sxg_module_cleanup);