blob: e291b1b87532f11daff02923d4bde29f95055d6e [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
2 * Copyright (C) 2005 - 2009 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17
18#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000019#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070020#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070021
22MODULE_VERSION(DRV_VER);
23MODULE_DEVICE_TABLE(pci, be_dev_ids);
24MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25MODULE_AUTHOR("ServerEngines Corporation");
26MODULE_LICENSE("GPL");
27
28static unsigned int rx_frag_size = 2048;
29module_param(rx_frag_size, uint, S_IRUGO);
30MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
31
Sathya Perla6b7c5b92009-03-11 23:32:03 -070032static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070033 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070034 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070035 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
36 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070037 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
Sathya Perla6b7c5b92009-03-11 23:32:03 -070038 { 0 }
39};
40MODULE_DEVICE_TABLE(pci, be_dev_ids);
41
42static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
43{
44 struct be_dma_mem *mem = &q->dma_mem;
45 if (mem->va)
46 pci_free_consistent(adapter->pdev, mem->size,
47 mem->va, mem->dma);
48}
49
50static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
51 u16 len, u16 entry_size)
52{
53 struct be_dma_mem *mem = &q->dma_mem;
54
55 memset(q, 0, sizeof(*q));
56 q->len = len;
57 q->entry_size = entry_size;
58 mem->size = len * entry_size;
59 mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma);
60 if (!mem->va)
61 return -1;
62 memset(mem->va, 0, mem->size);
63 return 0;
64}
65
Sathya Perla8788fdc2009-07-27 22:52:03 +000066static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -070067{
Sathya Perla8788fdc2009-07-27 22:52:03 +000068 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -070069 u32 reg = ioread32(addr);
70 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +000071
72 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -070073 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +000074 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -070075 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +000076 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -070077 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +000078
Sathya Perla6b7c5b92009-03-11 23:32:03 -070079 iowrite32(reg, addr);
80}
81
Sathya Perla8788fdc2009-07-27 22:52:03 +000082static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -070083{
84 u32 val = 0;
85 val |= qid & DB_RQ_RING_ID_MASK;
86 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +000087 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070088}
89
Sathya Perla8788fdc2009-07-27 22:52:03 +000090static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -070091{
92 u32 val = 0;
93 val |= qid & DB_TXULP_RING_ID_MASK;
94 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +000095 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070096}
97
Sathya Perla8788fdc2009-07-27 22:52:03 +000098static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -070099 bool arm, bool clear_int, u16 num_popped)
100{
101 u32 val = 0;
102 val |= qid & DB_EQ_RING_ID_MASK;
103 if (arm)
104 val |= 1 << DB_EQ_REARM_SHIFT;
105 if (clear_int)
106 val |= 1 << DB_EQ_CLR_SHIFT;
107 val |= 1 << DB_EQ_EVNT_SHIFT;
108 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000109 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700110}
111
Sathya Perla8788fdc2009-07-27 22:52:03 +0000112void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700113{
114 u32 val = 0;
115 val |= qid & DB_CQ_RING_ID_MASK;
116 if (arm)
117 val |= 1 << DB_CQ_REARM_SHIFT;
118 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000119 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700120}
121
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700122static int be_mac_addr_set(struct net_device *netdev, void *p)
123{
124 struct be_adapter *adapter = netdev_priv(netdev);
125 struct sockaddr *addr = p;
126 int status = 0;
127
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000128 if (!is_valid_ether_addr(addr->sa_data))
129 return -EADDRNOTAVAIL;
130
Sathya Perlaa65027e2009-08-17 00:58:04 +0000131 status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
132 if (status)
133 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700134
Sathya Perlaa65027e2009-08-17 00:58:04 +0000135 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
136 adapter->if_handle, &adapter->pmac_id);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700137 if (!status)
138 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
139
140 return status;
141}
142
Sathya Perlab31c50a2009-09-17 10:30:13 -0700143void netdev_stats_update(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700144{
145 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats.cmd.va);
146 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
147 struct be_port_rxf_stats *port_stats =
148 &rxf_stats->port[adapter->port_num];
Ajit Khaparde78122a52009-10-07 03:11:20 -0700149 struct net_device_stats *dev_stats = &adapter->netdev->stats;
Sathya Perla68110862009-06-10 02:21:16 +0000150 struct be_erx_stats *erx_stats = &hw_stats->erx;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700151
152 dev_stats->rx_packets = port_stats->rx_total_frames;
153 dev_stats->tx_packets = port_stats->tx_unicastframes +
154 port_stats->tx_multicastframes + port_stats->tx_broadcastframes;
155 dev_stats->rx_bytes = (u64) port_stats->rx_bytes_msd << 32 |
156 (u64) port_stats->rx_bytes_lsd;
157 dev_stats->tx_bytes = (u64) port_stats->tx_bytes_msd << 32 |
158 (u64) port_stats->tx_bytes_lsd;
159
160 /* bad pkts received */
161 dev_stats->rx_errors = port_stats->rx_crc_errors +
162 port_stats->rx_alignment_symbol_errors +
163 port_stats->rx_in_range_errors +
Sathya Perla68110862009-06-10 02:21:16 +0000164 port_stats->rx_out_range_errors +
165 port_stats->rx_frame_too_long +
166 port_stats->rx_dropped_too_small +
167 port_stats->rx_dropped_too_short +
168 port_stats->rx_dropped_header_too_small +
169 port_stats->rx_dropped_tcp_length +
170 port_stats->rx_dropped_runt +
171 port_stats->rx_tcp_checksum_errs +
172 port_stats->rx_ip_checksum_errs +
173 port_stats->rx_udp_checksum_errs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700174
Sathya Perla68110862009-06-10 02:21:16 +0000175 /* no space in linux buffers: best possible approximation */
Sathya Perla01ed30d2009-11-22 22:01:31 +0000176 dev_stats->rx_dropped =
177 erx_stats->rx_drops_no_fragments[adapter->rx_obj.q.id];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700178
179 /* detailed rx errors */
180 dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
Sathya Perla68110862009-06-10 02:21:16 +0000181 port_stats->rx_out_range_errors +
182 port_stats->rx_frame_too_long;
183
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700184 /* receive ring buffer overflow */
185 dev_stats->rx_over_errors = 0;
Sathya Perla68110862009-06-10 02:21:16 +0000186
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700187 dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
188
189 /* frame alignment errors */
190 dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000191
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700192 /* receiver fifo overrun */
193 /* drops_no_pbuf is no per i/f, it's per BE card */
194 dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
195 port_stats->rx_input_fifo_overflow +
196 rxf_stats->rx_drops_no_pbuf;
197 /* receiver missed packetd */
198 dev_stats->rx_missed_errors = 0;
Sathya Perla68110862009-06-10 02:21:16 +0000199
200 /* packet transmit problems */
201 dev_stats->tx_errors = 0;
202
203 /* no space available in linux */
204 dev_stats->tx_dropped = 0;
205
Ajit Khapardec5b9b922009-10-05 02:21:51 +0000206 dev_stats->multicast = port_stats->rx_multicast_frames;
Sathya Perla68110862009-06-10 02:21:16 +0000207 dev_stats->collisions = 0;
208
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700209 /* detailed tx_errors */
210 dev_stats->tx_aborted_errors = 0;
211 dev_stats->tx_carrier_errors = 0;
212 dev_stats->tx_fifo_errors = 0;
213 dev_stats->tx_heartbeat_errors = 0;
214 dev_stats->tx_window_errors = 0;
215}
216
Sathya Perla8788fdc2009-07-27 22:52:03 +0000217void be_link_status_update(struct be_adapter *adapter, bool link_up)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700218{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700219 struct net_device *netdev = adapter->netdev;
220
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700221 /* If link came up or went down */
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000222 if (adapter->link_up != link_up) {
Ajit Khaparde0dffc832009-11-29 17:57:46 +0000223 adapter->link_speed = -1;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000224 if (link_up) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700225 netif_start_queue(netdev);
226 netif_carrier_on(netdev);
227 printk(KERN_INFO "%s: Link up\n", netdev->name);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000228 } else {
229 netif_stop_queue(netdev);
230 netif_carrier_off(netdev);
231 printk(KERN_INFO "%s: Link down\n", netdev->name);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700232 }
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000233 adapter->link_up = link_up;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700234 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700235}
236
237/* Update the EQ delay n BE based on the RX frags consumed / sec */
238static void be_rx_eqd_update(struct be_adapter *adapter)
239{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700240 struct be_eq_obj *rx_eq = &adapter->rx_eq;
241 struct be_drvr_stats *stats = &adapter->stats.drvr_stats;
Sathya Perla4097f662009-03-24 16:40:13 -0700242 ulong now = jiffies;
243 u32 eqd;
244
245 if (!rx_eq->enable_aic)
246 return;
247
248 /* Wrapped around */
249 if (time_before(now, stats->rx_fps_jiffies)) {
250 stats->rx_fps_jiffies = now;
251 return;
252 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700253
254 /* Update once a second */
Sathya Perla4097f662009-03-24 16:40:13 -0700255 if ((now - stats->rx_fps_jiffies) < HZ)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700256 return;
257
258 stats->be_rx_fps = (stats->be_rx_frags - stats->be_prev_rx_frags) /
Sathya Perla4097f662009-03-24 16:40:13 -0700259 ((now - stats->rx_fps_jiffies) / HZ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700260
Sathya Perla4097f662009-03-24 16:40:13 -0700261 stats->rx_fps_jiffies = now;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700262 stats->be_prev_rx_frags = stats->be_rx_frags;
263 eqd = stats->be_rx_fps / 110000;
264 eqd = eqd << 3;
265 if (eqd > rx_eq->max_eqd)
266 eqd = rx_eq->max_eqd;
267 if (eqd < rx_eq->min_eqd)
268 eqd = rx_eq->min_eqd;
269 if (eqd < 10)
270 eqd = 0;
271 if (eqd != rx_eq->cur_eqd)
Sathya Perla8788fdc2009-07-27 22:52:03 +0000272 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700273
274 rx_eq->cur_eqd = eqd;
275}
276
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700277static struct net_device_stats *be_get_stats(struct net_device *dev)
278{
Ajit Khaparde78122a52009-10-07 03:11:20 -0700279 return &dev->stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700280}
281
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700282static u32 be_calc_rate(u64 bytes, unsigned long ticks)
283{
284 u64 rate = bytes;
285
286 do_div(rate, ticks / HZ);
287 rate <<= 3; /* bytes/sec -> bits/sec */
288 do_div(rate, 1000000ul); /* MB/Sec */
289
290 return rate;
291}
292
Sathya Perla4097f662009-03-24 16:40:13 -0700293static void be_tx_rate_update(struct be_adapter *adapter)
294{
295 struct be_drvr_stats *stats = drvr_stats(adapter);
296 ulong now = jiffies;
297
298 /* Wrapped around? */
299 if (time_before(now, stats->be_tx_jiffies)) {
300 stats->be_tx_jiffies = now;
301 return;
302 }
303
304 /* Update tx rate once in two seconds */
305 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700306 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
307 - stats->be_tx_bytes_prev,
308 now - stats->be_tx_jiffies);
Sathya Perla4097f662009-03-24 16:40:13 -0700309 stats->be_tx_jiffies = now;
310 stats->be_tx_bytes_prev = stats->be_tx_bytes;
311 }
312}
313
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700314static void be_tx_stats_update(struct be_adapter *adapter,
315 u32 wrb_cnt, u32 copied, bool stopped)
316{
Sathya Perla4097f662009-03-24 16:40:13 -0700317 struct be_drvr_stats *stats = drvr_stats(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700318 stats->be_tx_reqs++;
319 stats->be_tx_wrbs += wrb_cnt;
320 stats->be_tx_bytes += copied;
321 if (stopped)
322 stats->be_tx_stops++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700323}
324
325/* Determine number of WRB entries needed to xmit data in an skb */
326static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
327{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700328 int cnt = (skb->len > skb->data_len);
329
330 cnt += skb_shinfo(skb)->nr_frags;
331
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700332 /* to account for hdr wrb */
333 cnt++;
334 if (cnt & 1) {
335 /* add a dummy to make it an even num */
336 cnt++;
337 *dummy = true;
338 } else
339 *dummy = false;
340 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
341 return cnt;
342}
343
344static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
345{
346 wrb->frag_pa_hi = upper_32_bits(addr);
347 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
348 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
349}
350
351static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
352 bool vlan, u32 wrb_cnt, u32 len)
353{
354 memset(hdr, 0, sizeof(*hdr));
355
356 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
357
358 if (skb_shinfo(skb)->gso_segs > 1 && skb_shinfo(skb)->gso_size) {
359 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
360 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
361 hdr, skb_shinfo(skb)->gso_size);
362 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
363 if (is_tcp_pkt(skb))
364 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
365 else if (is_udp_pkt(skb))
366 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
367 }
368
369 if (vlan && vlan_tx_tag_present(skb)) {
370 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
371 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag,
372 hdr, vlan_tx_tag_get(skb));
373 }
374
375 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
376 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
377 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
378 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
379}
380
381
382static int make_tx_wrbs(struct be_adapter *adapter,
383 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
384{
385 u64 busaddr;
386 u32 i, copied = 0;
387 struct pci_dev *pdev = adapter->pdev;
388 struct sk_buff *first_skb = skb;
389 struct be_queue_info *txq = &adapter->tx_obj.q;
390 struct be_eth_wrb *wrb;
391 struct be_eth_hdr_wrb *hdr;
392
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700393 hdr = queue_head_node(txq);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000394 atomic_add(wrb_cnt, &txq->used);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700395 queue_head_inc(txq);
396
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000397 if (skb_dma_map(&pdev->dev, skb, DMA_TO_DEVICE)) {
398 dev_err(&pdev->dev, "TX DMA mapping failed\n");
399 return 0;
400 }
401
David S. Millerebc8d2a2009-06-09 01:01:31 -0700402 if (skb->len > skb->data_len) {
403 int len = skb->len - skb->data_len;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700404 wrb = queue_head_node(txq);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000405 busaddr = skb_shinfo(skb)->dma_head;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700406 wrb_fill(wrb, busaddr, len);
407 be_dws_cpu_to_le(wrb, sizeof(*wrb));
408 queue_head_inc(txq);
409 copied += len;
410 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700411
David S. Millerebc8d2a2009-06-09 01:01:31 -0700412 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
413 struct skb_frag_struct *frag =
414 &skb_shinfo(skb)->frags[i];
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000415
416 busaddr = skb_shinfo(skb)->dma_maps[i];
David S. Millerebc8d2a2009-06-09 01:01:31 -0700417 wrb = queue_head_node(txq);
418 wrb_fill(wrb, busaddr, frag->size);
419 be_dws_cpu_to_le(wrb, sizeof(*wrb));
420 queue_head_inc(txq);
421 copied += frag->size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700422 }
423
424 if (dummy_wrb) {
425 wrb = queue_head_node(txq);
426 wrb_fill(wrb, 0, 0);
427 be_dws_cpu_to_le(wrb, sizeof(*wrb));
428 queue_head_inc(txq);
429 }
430
431 wrb_fill_hdr(hdr, first_skb, adapter->vlan_grp ? true : false,
432 wrb_cnt, copied);
433 be_dws_cpu_to_le(hdr, sizeof(*hdr));
434
435 return copied;
436}
437
Stephen Hemminger613573252009-08-31 19:50:58 +0000438static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700439 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700440{
441 struct be_adapter *adapter = netdev_priv(netdev);
442 struct be_tx_obj *tx_obj = &adapter->tx_obj;
443 struct be_queue_info *txq = &tx_obj->q;
444 u32 wrb_cnt = 0, copied = 0;
445 u32 start = txq->head;
446 bool dummy_wrb, stopped = false;
447
448 wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb);
449
450 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000451 if (copied) {
452 /* record the sent skb in the sent_skb table */
453 BUG_ON(tx_obj->sent_skb_list[start]);
454 tx_obj->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700455
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000456 /* Ensure txq has space for the next skb; Else stop the queue
457 * *BEFORE* ringing the tx doorbell, so that we serialze the
458 * tx compls of the current transmit which'll wake up the queue
459 */
460 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
461 txq->len) {
462 netif_stop_queue(netdev);
463 stopped = true;
464 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700465
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000466 be_txq_notify(adapter, txq->id, wrb_cnt);
467
468 be_tx_stats_update(adapter, wrb_cnt, copied, stopped);
469 } else {
470 txq->head = start;
471 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700472 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700473 return NETDEV_TX_OK;
474}
475
476static int be_change_mtu(struct net_device *netdev, int new_mtu)
477{
478 struct be_adapter *adapter = netdev_priv(netdev);
479 if (new_mtu < BE_MIN_MTU ||
480 new_mtu > BE_MAX_JUMBO_FRAME_SIZE) {
481 dev_info(&adapter->pdev->dev,
482 "MTU must be between %d and %d bytes\n",
483 BE_MIN_MTU, BE_MAX_JUMBO_FRAME_SIZE);
484 return -EINVAL;
485 }
486 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
487 netdev->mtu, new_mtu);
488 netdev->mtu = new_mtu;
489 return 0;
490}
491
492/*
493 * if there are BE_NUM_VLANS_SUPPORTED or lesser number of VLANS configured,
494 * program them in BE. If more than BE_NUM_VLANS_SUPPORTED are configured,
495 * set the BE in promiscuous VLAN mode.
496 */
Sathya Perlab31c50a2009-09-17 10:30:13 -0700497static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700498{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700499 u16 vtag[BE_NUM_VLANS_SUPPORTED];
500 u16 ntags = 0, i;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700501 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700502
503 if (adapter->num_vlans <= BE_NUM_VLANS_SUPPORTED) {
504 /* Construct VLAN Table to give to HW */
505 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
506 if (adapter->vlan_tag[i]) {
507 vtag[ntags] = cpu_to_le16(i);
508 ntags++;
509 }
510 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700511 status = be_cmd_vlan_config(adapter, adapter->if_handle,
512 vtag, ntags, 1, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700513 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700514 status = be_cmd_vlan_config(adapter, adapter->if_handle,
515 NULL, 0, 1, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700516 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700517 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700518}
519
520static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
521{
522 struct be_adapter *adapter = netdev_priv(netdev);
523 struct be_eq_obj *rx_eq = &adapter->rx_eq;
524 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700525
Sathya Perla8788fdc2009-07-27 22:52:03 +0000526 be_eq_notify(adapter, rx_eq->q.id, false, false, 0);
527 be_eq_notify(adapter, tx_eq->q.id, false, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700528 adapter->vlan_grp = grp;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000529 be_eq_notify(adapter, rx_eq->q.id, true, false, 0);
530 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700531}
532
533static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
534{
535 struct be_adapter *adapter = netdev_priv(netdev);
536
537 adapter->num_vlans++;
538 adapter->vlan_tag[vid] = 1;
539
Sathya Perlab31c50a2009-09-17 10:30:13 -0700540 be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700541}
542
543static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
544{
545 struct be_adapter *adapter = netdev_priv(netdev);
546
547 adapter->num_vlans--;
548 adapter->vlan_tag[vid] = 0;
549
550 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
Sathya Perlab31c50a2009-09-17 10:30:13 -0700551 be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700552}
553
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700554static void be_set_multicast_list(struct net_device *netdev)
555{
556 struct be_adapter *adapter = netdev_priv(netdev);
557
558 if (netdev->flags & IFF_PROMISC) {
Sathya Perla8788fdc2009-07-27 22:52:03 +0000559 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
Sathya Perla24307ee2009-06-18 00:09:25 +0000560 adapter->promiscuous = true;
561 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700562 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000563
564 /* BE was previously in promiscous mode; disable it */
565 if (adapter->promiscuous) {
566 adapter->promiscuous = false;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000567 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
Sathya Perla24307ee2009-06-18 00:09:25 +0000568 }
569
Sathya Perlae7b909a2009-11-22 22:01:10 +0000570 /* Enable multicast promisc if num configured exceeds what we support */
571 if (netdev->flags & IFF_ALLMULTI || netdev->mc_count > BE_MAX_MC) {
572 be_cmd_multicast_set(adapter, adapter->if_handle, NULL, 0,
573 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000574 goto done;
575 }
576
Sathya Perla8788fdc2009-07-27 22:52:03 +0000577 be_cmd_multicast_set(adapter, adapter->if_handle, netdev->mc_list,
Sathya Perlae7b909a2009-11-22 22:01:10 +0000578 netdev->mc_count, &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000579done:
580 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700581}
582
Sathya Perla4097f662009-03-24 16:40:13 -0700583static void be_rx_rate_update(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700584{
Sathya Perla4097f662009-03-24 16:40:13 -0700585 struct be_drvr_stats *stats = drvr_stats(adapter);
586 ulong now = jiffies;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700587
Sathya Perla4097f662009-03-24 16:40:13 -0700588 /* Wrapped around */
589 if (time_before(now, stats->be_rx_jiffies)) {
590 stats->be_rx_jiffies = now;
591 return;
592 }
593
594 /* Update the rate once in two seconds */
595 if ((now - stats->be_rx_jiffies) < 2 * HZ)
596 return;
597
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700598 stats->be_rx_rate = be_calc_rate(stats->be_rx_bytes
599 - stats->be_rx_bytes_prev,
600 now - stats->be_rx_jiffies);
Sathya Perla4097f662009-03-24 16:40:13 -0700601 stats->be_rx_jiffies = now;
602 stats->be_rx_bytes_prev = stats->be_rx_bytes;
603}
604
605static void be_rx_stats_update(struct be_adapter *adapter,
606 u32 pktsize, u16 numfrags)
607{
608 struct be_drvr_stats *stats = drvr_stats(adapter);
609
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700610 stats->be_rx_compl++;
611 stats->be_rx_frags += numfrags;
612 stats->be_rx_bytes += pktsize;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700613}
614
Ajit Khaparde728a9972009-04-13 15:41:22 -0700615static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
616{
617 u8 l4_cksm, ip_version, ipcksm, tcpf = 0, udpf = 0, ipv6_chk;
618
619 l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
620 ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
621 ip_version = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
622 if (ip_version) {
623 tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
624 udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp);
625 }
626 ipv6_chk = (ip_version && (tcpf || udpf));
627
628 return ((l4_cksm && ipv6_chk && ipcksm) && cso) ? false : true;
629}
630
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700631static struct be_rx_page_info *
632get_rx_page_info(struct be_adapter *adapter, u16 frag_idx)
633{
634 struct be_rx_page_info *rx_page_info;
635 struct be_queue_info *rxq = &adapter->rx_obj.q;
636
637 rx_page_info = &adapter->rx_obj.page_info_tbl[frag_idx];
638 BUG_ON(!rx_page_info->page);
639
640 if (rx_page_info->last_page_user)
641 pci_unmap_page(adapter->pdev, pci_unmap_addr(rx_page_info, bus),
642 adapter->big_page_size, PCI_DMA_FROMDEVICE);
643
644 atomic_dec(&rxq->used);
645 return rx_page_info;
646}
647
648/* Throwaway the data in the Rx completion */
649static void be_rx_compl_discard(struct be_adapter *adapter,
650 struct be_eth_rx_compl *rxcp)
651{
652 struct be_queue_info *rxq = &adapter->rx_obj.q;
653 struct be_rx_page_info *page_info;
654 u16 rxq_idx, i, num_rcvd;
655
656 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
657 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
658
659 for (i = 0; i < num_rcvd; i++) {
660 page_info = get_rx_page_info(adapter, rxq_idx);
661 put_page(page_info->page);
662 memset(page_info, 0, sizeof(*page_info));
663 index_inc(&rxq_idx, rxq->len);
664 }
665}
666
667/*
668 * skb_fill_rx_data forms a complete skb for an ether frame
669 * indicated by rxcp.
670 */
671static void skb_fill_rx_data(struct be_adapter *adapter,
672 struct sk_buff *skb, struct be_eth_rx_compl *rxcp)
673{
674 struct be_queue_info *rxq = &adapter->rx_obj.q;
675 struct be_rx_page_info *page_info;
Ajit Khapardebd46cb62009-06-26 02:51:07 +0000676 u16 rxq_idx, i, num_rcvd, j;
Ajit Khapardefa774062009-07-22 09:28:55 -0700677 u32 pktsize, hdr_len, curr_frag_len, size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700678 u8 *start;
679
680 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
681 pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
682 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
683
684 page_info = get_rx_page_info(adapter, rxq_idx);
685
686 start = page_address(page_info->page) + page_info->page_offset;
687 prefetch(start);
688
689 /* Copy data in the first descriptor of this completion */
690 curr_frag_len = min(pktsize, rx_frag_size);
691
692 /* Copy the header portion into skb_data */
693 hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
694 memcpy(skb->data, start, hdr_len);
695 skb->len = curr_frag_len;
696 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
697 /* Complete packet has now been moved to data */
698 put_page(page_info->page);
699 skb->data_len = 0;
700 skb->tail += curr_frag_len;
701 } else {
702 skb_shinfo(skb)->nr_frags = 1;
703 skb_shinfo(skb)->frags[0].page = page_info->page;
704 skb_shinfo(skb)->frags[0].page_offset =
705 page_info->page_offset + hdr_len;
706 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
707 skb->data_len = curr_frag_len - hdr_len;
708 skb->tail += hdr_len;
709 }
710 memset(page_info, 0, sizeof(*page_info));
711
712 if (pktsize <= rx_frag_size) {
713 BUG_ON(num_rcvd != 1);
Sathya Perla76fbb422009-06-10 02:21:56 +0000714 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700715 }
716
717 /* More frags present for this completion */
Ajit Khapardefa774062009-07-22 09:28:55 -0700718 size = pktsize;
Ajit Khapardebd46cb62009-06-26 02:51:07 +0000719 for (i = 1, j = 0; i < num_rcvd; i++) {
Ajit Khapardefa774062009-07-22 09:28:55 -0700720 size -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700721 index_inc(&rxq_idx, rxq->len);
722 page_info = get_rx_page_info(adapter, rxq_idx);
723
Ajit Khapardefa774062009-07-22 09:28:55 -0700724 curr_frag_len = min(size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700725
Ajit Khapardebd46cb62009-06-26 02:51:07 +0000726 /* Coalesce all frags from the same physical page in one slot */
727 if (page_info->page_offset == 0) {
728 /* Fresh page */
729 j++;
730 skb_shinfo(skb)->frags[j].page = page_info->page;
731 skb_shinfo(skb)->frags[j].page_offset =
732 page_info->page_offset;
733 skb_shinfo(skb)->frags[j].size = 0;
734 skb_shinfo(skb)->nr_frags++;
735 } else {
736 put_page(page_info->page);
737 }
738
739 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700740 skb->len += curr_frag_len;
741 skb->data_len += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700742
743 memset(page_info, 0, sizeof(*page_info));
744 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +0000745 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700746
Sathya Perla76fbb422009-06-10 02:21:56 +0000747done:
Sathya Perla4097f662009-03-24 16:40:13 -0700748 be_rx_stats_update(adapter, pktsize, num_rcvd);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700749 return;
750}
751
Ajit Khaparde5be93b92009-07-21 12:36:19 -0700752/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700753static void be_rx_compl_process(struct be_adapter *adapter,
754 struct be_eth_rx_compl *rxcp)
755{
756 struct sk_buff *skb;
Ajit Khapardedcb9b562009-09-30 21:58:22 -0700757 u32 vlanf, vid;
758 u8 vtm;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700759
Ajit Khapardedcb9b562009-09-30 21:58:22 -0700760 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
761 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
762
763 /* vlanf could be wrongly set in some cards.
764 * ignore if vtm is not set */
765 if ((adapter->cap == 0x400) && !vtm)
766 vlanf = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700767
Eric Dumazet89d71a62009-10-13 05:34:20 +0000768 skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700769 if (!skb) {
770 if (net_ratelimit())
771 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
772 be_rx_compl_discard(adapter, rxcp);
773 return;
774 }
775
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700776 skb_fill_rx_data(adapter, skb, rxcp);
777
Ajit Khaparde728a9972009-04-13 15:41:22 -0700778 if (do_pkt_csum(rxcp, adapter->rx_csum))
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700779 skb->ip_summed = CHECKSUM_NONE;
Ajit Khaparde728a9972009-04-13 15:41:22 -0700780 else
781 skb->ip_summed = CHECKSUM_UNNECESSARY;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700782
783 skb->truesize = skb->len + sizeof(struct sk_buff);
784 skb->protocol = eth_type_trans(skb, adapter->netdev);
785 skb->dev = adapter->netdev;
786
Ajit Khapardedcb9b562009-09-30 21:58:22 -0700787 if (vlanf) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700788 if (!adapter->vlan_grp || adapter->num_vlans == 0) {
789 kfree_skb(skb);
790 return;
791 }
792 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
793 vid = be16_to_cpu(vid);
794 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
795 } else {
796 netif_receive_skb(skb);
797 }
798
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700799 return;
800}
801
Ajit Khaparde5be93b92009-07-21 12:36:19 -0700802/* Process the RX completion indicated by rxcp when GRO is enabled */
803static void be_rx_compl_process_gro(struct be_adapter *adapter,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700804 struct be_eth_rx_compl *rxcp)
805{
806 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -0700807 struct sk_buff *skb = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700808 struct be_queue_info *rxq = &adapter->rx_obj.q;
Ajit Khaparde5be93b92009-07-21 12:36:19 -0700809 struct be_eq_obj *eq_obj = &adapter->rx_eq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700810 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
Ajit Khapardebd46cb62009-06-26 02:51:07 +0000811 u16 i, rxq_idx = 0, vid, j;
Ajit Khapardedcb9b562009-09-30 21:58:22 -0700812 u8 vtm;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700813
814 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
815 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
816 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
817 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
Ajit Khapardedcb9b562009-09-30 21:58:22 -0700818 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
819
820 /* vlanf could be wrongly set in some cards.
821 * ignore if vtm is not set */
822 if ((adapter->cap == 0x400) && !vtm)
823 vlanf = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700824
Ajit Khaparde5be93b92009-07-21 12:36:19 -0700825 skb = napi_get_frags(&eq_obj->napi);
826 if (!skb) {
827 be_rx_compl_discard(adapter, rxcp);
828 return;
829 }
830
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700831 remaining = pkt_size;
Ajit Khapardebd46cb62009-06-26 02:51:07 +0000832 for (i = 0, j = -1; i < num_rcvd; i++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700833 page_info = get_rx_page_info(adapter, rxq_idx);
834
835 curr_frag_len = min(remaining, rx_frag_size);
836
Ajit Khapardebd46cb62009-06-26 02:51:07 +0000837 /* Coalesce all frags from the same physical page in one slot */
838 if (i == 0 || page_info->page_offset == 0) {
839 /* First frag or Fresh page */
840 j++;
Ajit Khaparde5be93b92009-07-21 12:36:19 -0700841 skb_shinfo(skb)->frags[j].page = page_info->page;
842 skb_shinfo(skb)->frags[j].page_offset =
843 page_info->page_offset;
844 skb_shinfo(skb)->frags[j].size = 0;
Ajit Khapardebd46cb62009-06-26 02:51:07 +0000845 } else {
846 put_page(page_info->page);
847 }
Ajit Khaparde5be93b92009-07-21 12:36:19 -0700848 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Ajit Khapardebd46cb62009-06-26 02:51:07 +0000849
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700850 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700851 index_inc(&rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700852 memset(page_info, 0, sizeof(*page_info));
853 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +0000854 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700855
Ajit Khaparde5be93b92009-07-21 12:36:19 -0700856 skb_shinfo(skb)->nr_frags = j + 1;
857 skb->len = pkt_size;
858 skb->data_len = pkt_size;
859 skb->truesize += pkt_size;
860 skb->ip_summed = CHECKSUM_UNNECESSARY;
861
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700862 if (likely(!vlanf)) {
Ajit Khaparde5be93b92009-07-21 12:36:19 -0700863 napi_gro_frags(&eq_obj->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700864 } else {
865 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
866 vid = be16_to_cpu(vid);
867
868 if (!adapter->vlan_grp || adapter->num_vlans == 0)
869 return;
870
Ajit Khaparde5be93b92009-07-21 12:36:19 -0700871 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700872 }
873
Sathya Perla4097f662009-03-24 16:40:13 -0700874 be_rx_stats_update(adapter, pkt_size, num_rcvd);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700875 return;
876}
877
878static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
879{
880 struct be_eth_rx_compl *rxcp = queue_tail_node(&adapter->rx_obj.cq);
881
882 if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
883 return NULL;
884
885 be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
886
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700887 queue_tail_inc(&adapter->rx_obj.cq);
888 return rxcp;
889}
890
Sathya Perlaa7a0ef32009-06-10 02:23:28 +0000891/* To reset the valid bit, we need to reset the whole word as
892 * when walking the queue the valid entries are little-endian
893 * and invalid entries are host endian
894 */
895static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
896{
897 rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
898}
899
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700900static inline struct page *be_alloc_pages(u32 size)
901{
902 gfp_t alloc_flags = GFP_ATOMIC;
903 u32 order = get_order(size);
904 if (order > 0)
905 alloc_flags |= __GFP_COMP;
906 return alloc_pages(alloc_flags, order);
907}
908
909/*
910 * Allocate a page, split it to fragments of size rx_frag_size and post as
911 * receive buffers to BE
912 */
913static void be_post_rx_frags(struct be_adapter *adapter)
914{
915 struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl;
916 struct be_rx_page_info *page_info = NULL;
917 struct be_queue_info *rxq = &adapter->rx_obj.q;
918 struct page *pagep = NULL;
919 struct be_eth_rx_d *rxd;
920 u64 page_dmaaddr = 0, frag_dmaaddr;
921 u32 posted, page_offset = 0;
922
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700923 page_info = &page_info_tbl[rxq->head];
924 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
925 if (!pagep) {
926 pagep = be_alloc_pages(adapter->big_page_size);
927 if (unlikely(!pagep)) {
928 drvr_stats(adapter)->be_ethrx_post_fail++;
929 break;
930 }
931 page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
932 adapter->big_page_size,
933 PCI_DMA_FROMDEVICE);
934 page_info->page_offset = 0;
935 } else {
936 get_page(pagep);
937 page_info->page_offset = page_offset + rx_frag_size;
938 }
939 page_offset = page_info->page_offset;
940 page_info->page = pagep;
941 pci_unmap_addr_set(page_info, bus, page_dmaaddr);
942 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
943
944 rxd = queue_head_node(rxq);
945 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
946 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
947 queue_head_inc(rxq);
948
949 /* Any space left in the current big page for another frag? */
950 if ((page_offset + rx_frag_size + rx_frag_size) >
951 adapter->big_page_size) {
952 pagep = NULL;
953 page_info->last_page_user = true;
954 }
955 page_info = &page_info_tbl[rxq->head];
956 }
957 if (pagep)
958 page_info->last_page_user = true;
959
960 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700961 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +0000962 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -0700963 } else if (atomic_read(&rxq->used) == 0) {
964 /* Let be_worker replenish when memory is available */
965 adapter->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700966 }
967
968 return;
969}
970
Sathya Perla5fb379e2009-06-18 00:02:59 +0000971static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700972{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700973 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
974
975 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
976 return NULL;
977
978 be_dws_le_to_cpu(txcp, sizeof(*txcp));
979
980 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
981
982 queue_tail_inc(tx_cq);
983 return txcp;
984}
985
986static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
987{
988 struct be_queue_info *txq = &adapter->tx_obj.q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700989 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
990 struct sk_buff *sent_skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700991 u16 cur_index, num_wrbs = 0;
992
993 cur_index = txq->tail;
994 sent_skb = sent_skbs[cur_index];
995 BUG_ON(!sent_skb);
996 sent_skbs[cur_index] = NULL;
997
998 do {
999 cur_index = txq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001000 num_wrbs++;
1001 queue_tail_inc(txq);
1002 } while (cur_index != last_index);
1003
1004 atomic_sub(num_wrbs, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001005 skb_dma_unmap(&adapter->pdev->dev, sent_skb, DMA_TO_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001006 kfree_skb(sent_skb);
1007}
1008
Sathya Perla859b1e42009-08-10 03:43:51 +00001009static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1010{
1011 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1012
1013 if (!eqe->evt)
1014 return NULL;
1015
1016 eqe->evt = le32_to_cpu(eqe->evt);
1017 queue_tail_inc(&eq_obj->q);
1018 return eqe;
1019}
1020
1021static int event_handle(struct be_adapter *adapter,
1022 struct be_eq_obj *eq_obj)
1023{
1024 struct be_eq_entry *eqe;
1025 u16 num = 0;
1026
1027 while ((eqe = event_get(eq_obj)) != NULL) {
1028 eqe->evt = 0;
1029 num++;
1030 }
1031
1032 /* Deal with any spurious interrupts that come
1033 * without events
1034 */
1035 be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1036 if (num)
1037 napi_schedule(&eq_obj->napi);
1038
1039 return num;
1040}
1041
1042/* Just read and notify events without processing them.
1043 * Used at the time of destroying event queues */
1044static void be_eq_clean(struct be_adapter *adapter,
1045 struct be_eq_obj *eq_obj)
1046{
1047 struct be_eq_entry *eqe;
1048 u16 num = 0;
1049
1050 while ((eqe = event_get(eq_obj)) != NULL) {
1051 eqe->evt = 0;
1052 num++;
1053 }
1054
1055 if (num)
1056 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1057}
1058
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001059static void be_rx_q_clean(struct be_adapter *adapter)
1060{
1061 struct be_rx_page_info *page_info;
1062 struct be_queue_info *rxq = &adapter->rx_obj.q;
1063 struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
1064 struct be_eth_rx_compl *rxcp;
1065 u16 tail;
1066
1067 /* First cleanup pending rx completions */
1068 while ((rxcp = be_rx_compl_get(adapter)) != NULL) {
1069 be_rx_compl_discard(adapter, rxcp);
Sathya Perlaa7a0ef32009-06-10 02:23:28 +00001070 be_rx_compl_reset(rxcp);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001071 be_cq_notify(adapter, rx_cq->id, true, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001072 }
1073
1074 /* Then free posted rx buffer that were not used */
1075 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001076 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001077 page_info = get_rx_page_info(adapter, tail);
1078 put_page(page_info->page);
1079 memset(page_info, 0, sizeof(*page_info));
1080 }
1081 BUG_ON(atomic_read(&rxq->used));
1082}
1083
Sathya Perlaa8e91792009-08-10 03:42:43 +00001084static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001085{
Sathya Perlaa8e91792009-08-10 03:42:43 +00001086 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001087 struct be_queue_info *txq = &adapter->tx_obj.q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001088 struct be_eth_tx_compl *txcp;
1089 u16 end_idx, cmpl = 0, timeo = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001090
Sathya Perlaa8e91792009-08-10 03:42:43 +00001091 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1092 do {
1093 while ((txcp = be_tx_compl_get(tx_cq))) {
1094 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1095 wrb_index, txcp);
1096 be_tx_compl_process(adapter, end_idx);
1097 cmpl++;
1098 }
1099 if (cmpl) {
1100 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1101 cmpl = 0;
1102 }
1103
1104 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1105 break;
1106
1107 mdelay(1);
1108 } while (true);
1109
1110 if (atomic_read(&txq->used))
1111 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1112 atomic_read(&txq->used));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001113}
1114
Sathya Perla5fb379e2009-06-18 00:02:59 +00001115static void be_mcc_queues_destroy(struct be_adapter *adapter)
1116{
1117 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001118
Sathya Perla8788fdc2009-07-27 22:52:03 +00001119 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001120 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001121 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001122 be_queue_free(adapter, q);
1123
Sathya Perla8788fdc2009-07-27 22:52:03 +00001124 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001125 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001126 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001127 be_queue_free(adapter, q);
1128}
1129
1130/* Must be called only after TX qs are created as MCC shares TX EQ */
1131static int be_mcc_queues_create(struct be_adapter *adapter)
1132{
1133 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001134
1135 /* Alloc MCC compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001136 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001137 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001138 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001139 goto err;
1140
1141 /* Ask BE to create MCC compl queue; share TX's eq */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001142 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001143 goto mcc_cq_free;
1144
1145 /* Alloc MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001146 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001147 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1148 goto mcc_cq_destroy;
1149
1150 /* Ask BE to create MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001151 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001152 goto mcc_q_free;
1153
1154 return 0;
1155
1156mcc_q_free:
1157 be_queue_free(adapter, q);
1158mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001159 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001160mcc_cq_free:
1161 be_queue_free(adapter, cq);
1162err:
1163 return -1;
1164}
1165
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001166static void be_tx_queues_destroy(struct be_adapter *adapter)
1167{
1168 struct be_queue_info *q;
1169
1170 q = &adapter->tx_obj.q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001171 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001172 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001173 be_queue_free(adapter, q);
1174
1175 q = &adapter->tx_obj.cq;
1176 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001177 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001178 be_queue_free(adapter, q);
1179
Sathya Perla859b1e42009-08-10 03:43:51 +00001180 /* Clear any residual events */
1181 be_eq_clean(adapter, &adapter->tx_eq);
1182
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001183 q = &adapter->tx_eq.q;
1184 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001185 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001186 be_queue_free(adapter, q);
1187}
1188
1189static int be_tx_queues_create(struct be_adapter *adapter)
1190{
1191 struct be_queue_info *eq, *q, *cq;
1192
1193 adapter->tx_eq.max_eqd = 0;
1194 adapter->tx_eq.min_eqd = 0;
1195 adapter->tx_eq.cur_eqd = 96;
1196 adapter->tx_eq.enable_aic = false;
1197 /* Alloc Tx Event queue */
1198 eq = &adapter->tx_eq.q;
1199 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1200 return -1;
1201
1202 /* Ask BE to create Tx Event queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001203 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001204 goto tx_eq_free;
1205 /* Alloc TX eth compl queue */
1206 cq = &adapter->tx_obj.cq;
1207 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1208 sizeof(struct be_eth_tx_compl)))
1209 goto tx_eq_destroy;
1210
1211 /* Ask BE to create Tx eth compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001212 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001213 goto tx_cq_free;
1214
1215 /* Alloc TX eth queue */
1216 q = &adapter->tx_obj.q;
1217 if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1218 goto tx_cq_destroy;
1219
1220 /* Ask BE to create Tx eth queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001221 if (be_cmd_txq_create(adapter, q, cq))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001222 goto tx_q_free;
1223 return 0;
1224
1225tx_q_free:
1226 be_queue_free(adapter, q);
1227tx_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001228 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001229tx_cq_free:
1230 be_queue_free(adapter, cq);
1231tx_eq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001232 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001233tx_eq_free:
1234 be_queue_free(adapter, eq);
1235 return -1;
1236}
1237
1238static void be_rx_queues_destroy(struct be_adapter *adapter)
1239{
1240 struct be_queue_info *q;
1241
1242 q = &adapter->rx_obj.q;
1243 if (q->created) {
Sathya Perla8788fdc2009-07-27 22:52:03 +00001244 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001245 be_rx_q_clean(adapter);
1246 }
1247 be_queue_free(adapter, q);
1248
1249 q = &adapter->rx_obj.cq;
1250 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001251 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001252 be_queue_free(adapter, q);
1253
Sathya Perla859b1e42009-08-10 03:43:51 +00001254 /* Clear any residual events */
1255 be_eq_clean(adapter, &adapter->rx_eq);
1256
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001257 q = &adapter->rx_eq.q;
1258 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001259 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001260 be_queue_free(adapter, q);
1261}
1262
1263static int be_rx_queues_create(struct be_adapter *adapter)
1264{
1265 struct be_queue_info *eq, *q, *cq;
1266 int rc;
1267
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001268 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1269 adapter->rx_eq.max_eqd = BE_MAX_EQD;
1270 adapter->rx_eq.min_eqd = 0;
1271 adapter->rx_eq.cur_eqd = 0;
1272 adapter->rx_eq.enable_aic = true;
1273
1274 /* Alloc Rx Event queue */
1275 eq = &adapter->rx_eq.q;
1276 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1277 sizeof(struct be_eq_entry));
1278 if (rc)
1279 return rc;
1280
1281 /* Ask BE to create Rx Event queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001282 rc = be_cmd_eq_create(adapter, eq, adapter->rx_eq.cur_eqd);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001283 if (rc)
1284 goto rx_eq_free;
1285
1286 /* Alloc RX eth compl queue */
1287 cq = &adapter->rx_obj.cq;
1288 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1289 sizeof(struct be_eth_rx_compl));
1290 if (rc)
1291 goto rx_eq_destroy;
1292
1293 /* Ask BE to create Rx eth compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001294 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001295 if (rc)
1296 goto rx_cq_free;
1297
1298 /* Alloc RX eth queue */
1299 q = &adapter->rx_obj.q;
1300 rc = be_queue_alloc(adapter, q, RX_Q_LEN, sizeof(struct be_eth_rx_d));
1301 if (rc)
1302 goto rx_cq_destroy;
1303
1304 /* Ask BE to create Rx eth queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001305 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001306 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle, false);
1307 if (rc)
1308 goto rx_q_free;
1309
1310 return 0;
1311rx_q_free:
1312 be_queue_free(adapter, q);
1313rx_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001314 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001315rx_cq_free:
1316 be_queue_free(adapter, cq);
1317rx_eq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001318 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001319rx_eq_free:
1320 be_queue_free(adapter, eq);
1321 return rc;
1322}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001323
Sathya Perlab628bde2009-08-17 00:58:26 +00001324/* There are 8 evt ids per func. Retruns the evt id's bit number */
1325static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
1326{
1327 return eq_id - 8 * be_pci_func(adapter);
1328}
1329
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001330static irqreturn_t be_intx(int irq, void *dev)
1331{
1332 struct be_adapter *adapter = dev;
Sathya Perla8788fdc2009-07-27 22:52:03 +00001333 int isr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001334
Sathya Perla8788fdc2009-07-27 22:52:03 +00001335 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
Sathya Perlaeec368f2009-07-27 22:52:23 +00001336 be_pci_func(adapter) * CEV_ISR_SIZE);
Sathya Perlac001c212009-07-01 01:06:07 +00001337 if (!isr)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001338 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001339
Sathya Perla8788fdc2009-07-27 22:52:03 +00001340 event_handle(adapter, &adapter->tx_eq);
1341 event_handle(adapter, &adapter->rx_eq);
Sathya Perlac001c212009-07-01 01:06:07 +00001342
Sathya Perla8788fdc2009-07-27 22:52:03 +00001343 return IRQ_HANDLED;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001344}
1345
1346static irqreturn_t be_msix_rx(int irq, void *dev)
1347{
1348 struct be_adapter *adapter = dev;
1349
Sathya Perla8788fdc2009-07-27 22:52:03 +00001350 event_handle(adapter, &adapter->rx_eq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001351
1352 return IRQ_HANDLED;
1353}
1354
Sathya Perla5fb379e2009-06-18 00:02:59 +00001355static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001356{
1357 struct be_adapter *adapter = dev;
1358
Sathya Perla8788fdc2009-07-27 22:52:03 +00001359 event_handle(adapter, &adapter->tx_eq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001360
1361 return IRQ_HANDLED;
1362}
1363
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001364static inline bool do_gro(struct be_adapter *adapter,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001365 struct be_eth_rx_compl *rxcp)
1366{
1367 int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1368 int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
1369
1370 if (err)
1371 drvr_stats(adapter)->be_rxcp_err++;
1372
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001373 return (tcp_frame && !err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001374}
1375
1376int be_poll_rx(struct napi_struct *napi, int budget)
1377{
1378 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1379 struct be_adapter *adapter =
1380 container_of(rx_eq, struct be_adapter, rx_eq);
1381 struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
1382 struct be_eth_rx_compl *rxcp;
1383 u32 work_done;
1384
Ajit Khapardeb7b83ac2009-11-29 17:57:22 +00001385 adapter->stats.drvr_stats.be_rx_polls++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001386 for (work_done = 0; work_done < budget; work_done++) {
1387 rxcp = be_rx_compl_get(adapter);
1388 if (!rxcp)
1389 break;
1390
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001391 if (do_gro(adapter, rxcp))
1392 be_rx_compl_process_gro(adapter, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001393 else
1394 be_rx_compl_process(adapter, rxcp);
Sathya Perlaa7a0ef32009-06-10 02:23:28 +00001395
1396 be_rx_compl_reset(rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001397 }
1398
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001399 /* Refill the queue */
1400 if (atomic_read(&adapter->rx_obj.q.used) < RX_FRAGS_REFILL_WM)
1401 be_post_rx_frags(adapter);
1402
1403 /* All consumed */
1404 if (work_done < budget) {
1405 napi_complete(napi);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001406 be_cq_notify(adapter, rx_cq->id, true, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001407 } else {
1408 /* More to be consumed; continue with interrupts disabled */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001409 be_cq_notify(adapter, rx_cq->id, false, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001410 }
1411 return work_done;
1412}
1413
Sathya Perla5fb379e2009-06-18 00:02:59 +00001414void be_process_tx(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001415{
Sathya Perla5fb379e2009-06-18 00:02:59 +00001416 struct be_queue_info *txq = &adapter->tx_obj.q;
1417 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001418 struct be_eth_tx_compl *txcp;
1419 u32 num_cmpl = 0;
1420 u16 end_idx;
1421
Sathya Perla5fb379e2009-06-18 00:02:59 +00001422 while ((txcp = be_tx_compl_get(tx_cq))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001423 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1424 wrb_index, txcp);
1425 be_tx_compl_process(adapter, end_idx);
1426 num_cmpl++;
1427 }
1428
Sathya Perla5fb379e2009-06-18 00:02:59 +00001429 if (num_cmpl) {
Sathya Perla8788fdc2009-07-27 22:52:03 +00001430 be_cq_notify(adapter, tx_cq->id, true, num_cmpl);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001431
1432 /* As Tx wrbs have been freed up, wake up netdev queue if
1433 * it was stopped due to lack of tx wrbs.
1434 */
1435 if (netif_queue_stopped(adapter->netdev) &&
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001436 atomic_read(&txq->used) < txq->len / 2) {
Sathya Perla5fb379e2009-06-18 00:02:59 +00001437 netif_wake_queue(adapter->netdev);
1438 }
1439
1440 drvr_stats(adapter)->be_tx_events++;
1441 drvr_stats(adapter)->be_tx_compl += num_cmpl;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001442 }
Sathya Perla5fb379e2009-06-18 00:02:59 +00001443}
1444
1445/* As TX and MCC share the same EQ check for both TX and MCC completions.
1446 * For TX/MCC we don't honour budget; consume everything
1447 */
1448static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1449{
1450 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1451 struct be_adapter *adapter =
1452 container_of(tx_eq, struct be_adapter, tx_eq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001453
1454 napi_complete(napi);
1455
Sathya Perla5fb379e2009-06-18 00:02:59 +00001456 be_process_tx(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001457
Sathya Perla8788fdc2009-07-27 22:52:03 +00001458 be_process_mcc(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001459
1460 return 1;
1461}
1462
Sathya Perlaea1dae12009-03-19 23:56:20 -07001463static void be_worker(struct work_struct *work)
1464{
1465 struct be_adapter *adapter =
1466 container_of(work, struct be_adapter, work.work);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001467
Sathya Perlab31c50a2009-09-17 10:30:13 -07001468 be_cmd_get_stats(adapter, &adapter->stats.cmd);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001469
1470 /* Set EQ delay */
1471 be_rx_eqd_update(adapter);
1472
Sathya Perla4097f662009-03-24 16:40:13 -07001473 be_tx_rate_update(adapter);
1474 be_rx_rate_update(adapter);
1475
Sathya Perlaea1dae12009-03-19 23:56:20 -07001476 if (adapter->rx_post_starved) {
1477 adapter->rx_post_starved = false;
1478 be_post_rx_frags(adapter);
1479 }
1480
1481 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1482}
1483
Sathya Perla8d56ff12009-11-22 22:02:26 +00001484static void be_msix_disable(struct be_adapter *adapter)
1485{
1486 if (adapter->msix_enabled) {
1487 pci_disable_msix(adapter->pdev);
1488 adapter->msix_enabled = false;
1489 }
1490}
1491
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001492static void be_msix_enable(struct be_adapter *adapter)
1493{
1494 int i, status;
1495
1496 for (i = 0; i < BE_NUM_MSIX_VECTORS; i++)
1497 adapter->msix_entries[i].entry = i;
1498
1499 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1500 BE_NUM_MSIX_VECTORS);
1501 if (status == 0)
1502 adapter->msix_enabled = true;
1503 return;
1504}
1505
1506static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id)
1507{
Sathya Perlab628bde2009-08-17 00:58:26 +00001508 return adapter->msix_entries[
1509 be_evt_bit_get(adapter, eq_id)].vector;
1510}
1511
1512static int be_request_irq(struct be_adapter *adapter,
1513 struct be_eq_obj *eq_obj,
1514 void *handler, char *desc)
1515{
1516 struct net_device *netdev = adapter->netdev;
1517 int vec;
1518
1519 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
1520 vec = be_msix_vec_get(adapter, eq_obj->q.id);
1521 return request_irq(vec, handler, 0, eq_obj->desc, adapter);
1522}
1523
1524static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj)
1525{
1526 int vec = be_msix_vec_get(adapter, eq_obj->q.id);
1527 free_irq(vec, adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001528}
1529
1530static int be_msix_register(struct be_adapter *adapter)
1531{
Sathya Perlab628bde2009-08-17 00:58:26 +00001532 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001533
Sathya Perlab628bde2009-08-17 00:58:26 +00001534 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx");
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001535 if (status)
1536 goto err;
1537
Sathya Perlab628bde2009-08-17 00:58:26 +00001538 status = be_request_irq(adapter, &adapter->rx_eq, be_msix_rx, "rx");
1539 if (status)
1540 goto free_tx_irq;
1541
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001542 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00001543
1544free_tx_irq:
1545 be_free_irq(adapter, &adapter->tx_eq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001546err:
1547 dev_warn(&adapter->pdev->dev,
1548 "MSIX Request IRQ failed - err %d\n", status);
1549 pci_disable_msix(adapter->pdev);
1550 adapter->msix_enabled = false;
1551 return status;
1552}
1553
1554static int be_irq_register(struct be_adapter *adapter)
1555{
1556 struct net_device *netdev = adapter->netdev;
1557 int status;
1558
1559 if (adapter->msix_enabled) {
1560 status = be_msix_register(adapter);
1561 if (status == 0)
1562 goto done;
1563 }
1564
1565 /* INTx */
1566 netdev->irq = adapter->pdev->irq;
1567 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
1568 adapter);
1569 if (status) {
1570 dev_err(&adapter->pdev->dev,
1571 "INTx request IRQ failed - err %d\n", status);
1572 return status;
1573 }
1574done:
1575 adapter->isr_registered = true;
1576 return 0;
1577}
1578
1579static void be_irq_unregister(struct be_adapter *adapter)
1580{
1581 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001582
1583 if (!adapter->isr_registered)
1584 return;
1585
1586 /* INTx */
1587 if (!adapter->msix_enabled) {
1588 free_irq(netdev->irq, adapter);
1589 goto done;
1590 }
1591
1592 /* MSIx */
Sathya Perlab628bde2009-08-17 00:58:26 +00001593 be_free_irq(adapter, &adapter->tx_eq);
1594 be_free_irq(adapter, &adapter->rx_eq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001595done:
1596 adapter->isr_registered = false;
1597 return;
1598}
1599
1600static int be_open(struct net_device *netdev)
1601{
1602 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001603 struct be_eq_obj *rx_eq = &adapter->rx_eq;
1604 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00001605 bool link_up;
1606 int status;
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07001607 u8 mac_speed;
1608 u16 link_speed;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001609
1610 /* First time posting */
1611 be_post_rx_frags(adapter);
1612
1613 napi_enable(&rx_eq->napi);
1614 napi_enable(&tx_eq->napi);
1615
1616 be_irq_register(adapter);
1617
Sathya Perla8788fdc2009-07-27 22:52:03 +00001618 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001619
1620 /* The evt queues are created in unarmed state; arm them */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001621 be_eq_notify(adapter, rx_eq->q.id, true, false, 0);
1622 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001623
1624 /* Rx compl queue may be in unarmed state; rearm it */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001625 be_cq_notify(adapter, adapter->rx_obj.cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001626
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07001627 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
1628 &link_speed);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00001629 if (status)
Ajit Khaparde4f2aa892009-11-06 02:07:32 +00001630 goto ret_sts;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00001631 be_link_status_update(adapter, link_up);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001632
Ajit Khaparde4f2aa892009-11-06 02:07:32 +00001633 status = be_vid_config(adapter);
1634 if (status)
1635 goto ret_sts;
1636
1637 status = be_cmd_set_flow_control(adapter,
1638 adapter->tx_fc, adapter->rx_fc);
1639 if (status)
1640 goto ret_sts;
1641
Sathya Perla5fb379e2009-06-18 00:02:59 +00001642 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Ajit Khaparde4f2aa892009-11-06 02:07:32 +00001643ret_sts:
1644 return status;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001645}
1646
1647static int be_setup(struct be_adapter *adapter)
1648{
Sathya Perla5fb379e2009-06-18 00:02:59 +00001649 struct net_device *netdev = adapter->netdev;
Sathya Perla73d540f2009-10-14 20:20:42 +00001650 u32 cap_flags, en_flags;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001651 int status;
1652
Sathya Perla73d540f2009-10-14 20:20:42 +00001653 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
1654 BE_IF_FLAGS_MCAST_PROMISCUOUS |
1655 BE_IF_FLAGS_PROMISCUOUS |
1656 BE_IF_FLAGS_PASS_L3L4_ERRORS;
1657 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
1658 BE_IF_FLAGS_PASS_L3L4_ERRORS;
1659
1660 status = be_cmd_if_create(adapter, cap_flags, en_flags,
1661 netdev->dev_addr, false/* pmac_invalid */,
1662 &adapter->if_handle, &adapter->pmac_id);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001663 if (status != 0)
1664 goto do_none;
1665
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001666 status = be_tx_queues_create(adapter);
1667 if (status != 0)
1668 goto if_destroy;
1669
1670 status = be_rx_queues_create(adapter);
1671 if (status != 0)
1672 goto tx_qs_destroy;
1673
Sathya Perla5fb379e2009-06-18 00:02:59 +00001674 status = be_mcc_queues_create(adapter);
1675 if (status != 0)
1676 goto rx_qs_destroy;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001677
Ajit Khaparde0dffc832009-11-29 17:57:46 +00001678 adapter->link_speed = -1;
1679
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001680 return 0;
1681
Sathya Perla5fb379e2009-06-18 00:02:59 +00001682rx_qs_destroy:
1683 be_rx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001684tx_qs_destroy:
1685 be_tx_queues_destroy(adapter);
1686if_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001687 be_cmd_if_destroy(adapter, adapter->if_handle);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001688do_none:
1689 return status;
1690}
1691
Sathya Perla5fb379e2009-06-18 00:02:59 +00001692static int be_clear(struct be_adapter *adapter)
1693{
Sathya Perla1a8887d2009-08-17 00:58:41 +00001694 be_mcc_queues_destroy(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001695 be_rx_queues_destroy(adapter);
1696 be_tx_queues_destroy(adapter);
1697
Sathya Perla8788fdc2009-07-27 22:52:03 +00001698 be_cmd_if_destroy(adapter, adapter->if_handle);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001699
Sathya Perla2243e2e2009-11-22 22:02:03 +00001700 /* tell fw we're done with firing cmds */
1701 be_cmd_fw_clean(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001702 return 0;
1703}
1704
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001705static int be_close(struct net_device *netdev)
1706{
1707 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001708 struct be_eq_obj *rx_eq = &adapter->rx_eq;
1709 struct be_eq_obj *tx_eq = &adapter->tx_eq;
1710 int vec;
1711
Sathya Perlab305be72009-06-10 02:18:35 +00001712 cancel_delayed_work_sync(&adapter->work);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001713
1714 netif_stop_queue(netdev);
1715 netif_carrier_off(netdev);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00001716 adapter->link_up = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001717
Sathya Perla8788fdc2009-07-27 22:52:03 +00001718 be_intr_set(adapter, false);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001719
1720 if (adapter->msix_enabled) {
1721 vec = be_msix_vec_get(adapter, tx_eq->q.id);
1722 synchronize_irq(vec);
1723 vec = be_msix_vec_get(adapter, rx_eq->q.id);
1724 synchronize_irq(vec);
1725 } else {
1726 synchronize_irq(netdev->irq);
1727 }
1728 be_irq_unregister(adapter);
1729
1730 napi_disable(&rx_eq->napi);
1731 napi_disable(&tx_eq->napi);
1732
Sathya Perlaa8e91792009-08-10 03:42:43 +00001733 /* Wait for all pending tx completions to arrive so that
1734 * all tx skbs are freed.
1735 */
1736 be_tx_compl_clean(adapter);
1737
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001738 return 0;
1739}
1740
Ajit Khaparde84517482009-09-04 03:12:16 +00001741#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
1742char flash_cookie[2][16] = {"*** SE FLAS",
1743 "H DIRECTORY *** "};
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08001744
1745static bool be_flash_redboot(struct be_adapter *adapter,
1746 const u8 *p)
1747{
1748 u32 crc_offset;
1749 u8 flashed_crc[4];
1750 int status;
1751 crc_offset = FLASH_REDBOOT_START + FLASH_REDBOOT_IMAGE_MAX_SIZE - 4
1752 + sizeof(struct flash_file_hdr) - 32*1024;
1753 p += crc_offset;
1754 status = be_cmd_get_flash_crc(adapter, flashed_crc);
1755 if (status) {
1756 dev_err(&adapter->pdev->dev,
1757 "could not get crc from flash, not flashing redboot\n");
1758 return false;
1759 }
1760
1761 /*update redboot only if crc does not match*/
1762 if (!memcmp(flashed_crc, p, 4))
1763 return false;
1764 else
1765 return true;
1766
1767}
1768
Ajit Khaparde84517482009-09-04 03:12:16 +00001769static int be_flash_image(struct be_adapter *adapter,
1770 const struct firmware *fw,
1771 struct be_dma_mem *flash_cmd, u32 flash_type)
1772{
1773 int status;
1774 u32 flash_op, image_offset = 0, total_bytes, image_size = 0;
1775 int num_bytes;
1776 const u8 *p = fw->data;
1777 struct be_cmd_write_flashrom *req = flash_cmd->va;
1778
1779 switch (flash_type) {
1780 case FLASHROM_TYPE_ISCSI_ACTIVE:
1781 image_offset = FLASH_iSCSI_PRIMARY_IMAGE_START;
1782 image_size = FLASH_IMAGE_MAX_SIZE;
1783 break;
1784 case FLASHROM_TYPE_ISCSI_BACKUP:
1785 image_offset = FLASH_iSCSI_BACKUP_IMAGE_START;
1786 image_size = FLASH_IMAGE_MAX_SIZE;
1787 break;
1788 case FLASHROM_TYPE_FCOE_FW_ACTIVE:
1789 image_offset = FLASH_FCoE_PRIMARY_IMAGE_START;
1790 image_size = FLASH_IMAGE_MAX_SIZE;
1791 break;
1792 case FLASHROM_TYPE_FCOE_FW_BACKUP:
1793 image_offset = FLASH_FCoE_BACKUP_IMAGE_START;
1794 image_size = FLASH_IMAGE_MAX_SIZE;
1795 break;
1796 case FLASHROM_TYPE_BIOS:
1797 image_offset = FLASH_iSCSI_BIOS_START;
1798 image_size = FLASH_BIOS_IMAGE_MAX_SIZE;
1799 break;
1800 case FLASHROM_TYPE_FCOE_BIOS:
1801 image_offset = FLASH_FCoE_BIOS_START;
1802 image_size = FLASH_BIOS_IMAGE_MAX_SIZE;
1803 break;
1804 case FLASHROM_TYPE_PXE_BIOS:
1805 image_offset = FLASH_PXE_BIOS_START;
1806 image_size = FLASH_BIOS_IMAGE_MAX_SIZE;
1807 break;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08001808 case FLASHROM_TYPE_REDBOOT:
1809 if (!be_flash_redboot(adapter, fw->data))
1810 return 0;
1811 image_offset = FLASH_REDBOOT_ISM_START;
1812 image_size = FLASH_REDBOOT_IMAGE_MAX_SIZE;
1813 break;
Ajit Khaparde84517482009-09-04 03:12:16 +00001814 default:
1815 return 0;
1816 }
1817
1818 p += sizeof(struct flash_file_hdr) + image_offset;
1819 if (p + image_size > fw->data + fw->size)
1820 return -1;
1821
1822 total_bytes = image_size;
1823
1824 while (total_bytes) {
1825 if (total_bytes > 32*1024)
1826 num_bytes = 32*1024;
1827 else
1828 num_bytes = total_bytes;
1829 total_bytes -= num_bytes;
1830
1831 if (!total_bytes)
1832 flash_op = FLASHROM_OPER_FLASH;
1833 else
1834 flash_op = FLASHROM_OPER_SAVE;
1835 memcpy(req->params.data_buf, p, num_bytes);
1836 p += num_bytes;
1837 status = be_cmd_write_flashrom(adapter, flash_cmd,
1838 flash_type, flash_op, num_bytes);
1839 if (status) {
1840 dev_err(&adapter->pdev->dev,
1841 "cmd to write to flash rom failed. type/op %d/%d\n",
1842 flash_type, flash_op);
1843 return -1;
1844 }
1845 yield();
1846 }
1847
1848 return 0;
1849}
1850
1851int be_load_fw(struct be_adapter *adapter, u8 *func)
1852{
1853 char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
1854 const struct firmware *fw;
1855 struct flash_file_hdr *fhdr;
1856 struct flash_section_info *fsec = NULL;
1857 struct be_dma_mem flash_cmd;
1858 int status;
1859 const u8 *p;
1860 bool entry_found = false;
1861 int flash_type;
1862 char fw_ver[FW_VER_LEN];
1863 char fw_cfg;
1864
1865 status = be_cmd_get_fw_ver(adapter, fw_ver);
1866 if (status)
1867 return status;
1868
1869 fw_cfg = *(fw_ver + 2);
1870 if (fw_cfg == '0')
1871 fw_cfg = '1';
1872 strcpy(fw_file, func);
1873
1874 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
1875 if (status)
1876 goto fw_exit;
1877
1878 p = fw->data;
1879 fhdr = (struct flash_file_hdr *) p;
1880 if (memcmp(fhdr->sign, FW_FILE_HDR_SIGN, strlen(FW_FILE_HDR_SIGN))) {
1881 dev_err(&adapter->pdev->dev,
1882 "Firmware(%s) load error (signature did not match)\n",
1883 fw_file);
1884 status = -1;
1885 goto fw_exit;
1886 }
1887
1888 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
1889
1890 p += sizeof(struct flash_file_hdr);
1891 while (p < (fw->data + fw->size)) {
1892 fsec = (struct flash_section_info *)p;
1893 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie))) {
1894 entry_found = true;
1895 break;
1896 }
1897 p += 32;
1898 }
1899
1900 if (!entry_found) {
1901 status = -1;
1902 dev_err(&adapter->pdev->dev,
1903 "Flash cookie not found in firmware image\n");
1904 goto fw_exit;
1905 }
1906
1907 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
1908 flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size,
1909 &flash_cmd.dma);
1910 if (!flash_cmd.va) {
1911 status = -ENOMEM;
1912 dev_err(&adapter->pdev->dev,
1913 "Memory allocation failure while flashing\n");
1914 goto fw_exit;
1915 }
1916
1917 for (flash_type = FLASHROM_TYPE_ISCSI_ACTIVE;
1918 flash_type <= FLASHROM_TYPE_FCOE_FW_BACKUP; flash_type++) {
1919 status = be_flash_image(adapter, fw, &flash_cmd,
1920 flash_type);
1921 if (status)
1922 break;
1923 }
1924
1925 pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va,
1926 flash_cmd.dma);
1927 if (status) {
1928 dev_err(&adapter->pdev->dev, "Firmware load error\n");
1929 goto fw_exit;
1930 }
1931
1932 dev_info(&adapter->pdev->dev, "Firmware flashed succesfully\n");
1933
1934fw_exit:
1935 release_firmware(fw);
1936 return status;
1937}
1938
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001939static struct net_device_ops be_netdev_ops = {
1940 .ndo_open = be_open,
1941 .ndo_stop = be_close,
1942 .ndo_start_xmit = be_xmit,
1943 .ndo_get_stats = be_get_stats,
1944 .ndo_set_rx_mode = be_set_multicast_list,
1945 .ndo_set_mac_address = be_mac_addr_set,
1946 .ndo_change_mtu = be_change_mtu,
1947 .ndo_validate_addr = eth_validate_addr,
1948 .ndo_vlan_rx_register = be_vlan_register,
1949 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
1950 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
1951};
1952
1953static void be_netdev_init(struct net_device *netdev)
1954{
1955 struct be_adapter *adapter = netdev_priv(netdev);
1956
1957 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
Ajit Khaparde583e3f32009-10-05 02:22:19 +00001958 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM |
1959 NETIF_F_GRO;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001960
Ajit Khaparde51c59872009-11-29 17:54:54 +00001961 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM;
1962
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001963 netdev->flags |= IFF_MULTICAST;
1964
Ajit Khaparde728a9972009-04-13 15:41:22 -07001965 adapter->rx_csum = true;
1966
Ajit Khaparde9e90c962009-11-06 02:06:59 +00001967 /* Default settings for Rx and Tx flow control */
1968 adapter->rx_fc = true;
1969 adapter->tx_fc = true;
1970
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001971 netif_set_gso_max_size(netdev, 65535);
1972
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001973 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
1974
1975 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
1976
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001977 netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx,
1978 BE_NAPI_WEIGHT);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001979 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001980 BE_NAPI_WEIGHT);
1981
1982 netif_carrier_off(netdev);
1983 netif_stop_queue(netdev);
1984}
1985
1986static void be_unmap_pci_bars(struct be_adapter *adapter)
1987{
Sathya Perla8788fdc2009-07-27 22:52:03 +00001988 if (adapter->csr)
1989 iounmap(adapter->csr);
1990 if (adapter->db)
1991 iounmap(adapter->db);
1992 if (adapter->pcicfg)
1993 iounmap(adapter->pcicfg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001994}
1995
1996static int be_map_pci_bars(struct be_adapter *adapter)
1997{
1998 u8 __iomem *addr;
1999
2000 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2001 pci_resource_len(adapter->pdev, 2));
2002 if (addr == NULL)
2003 return -ENOMEM;
Sathya Perla8788fdc2009-07-27 22:52:03 +00002004 adapter->csr = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002005
2006 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 4),
2007 128 * 1024);
2008 if (addr == NULL)
2009 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00002010 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002011
2012 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 1),
2013 pci_resource_len(adapter->pdev, 1));
2014 if (addr == NULL)
2015 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00002016 adapter->pcicfg = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002017
2018 return 0;
2019pci_map_err:
2020 be_unmap_pci_bars(adapter);
2021 return -ENOMEM;
2022}
2023
2024
2025static void be_ctrl_cleanup(struct be_adapter *adapter)
2026{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002027 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002028
2029 be_unmap_pci_bars(adapter);
2030
2031 if (mem->va)
2032 pci_free_consistent(adapter->pdev, mem->size,
2033 mem->va, mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00002034
2035 mem = &adapter->mc_cmd_mem;
2036 if (mem->va)
2037 pci_free_consistent(adapter->pdev, mem->size,
2038 mem->va, mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002039}
2040
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002041static int be_ctrl_init(struct be_adapter *adapter)
2042{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002043 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2044 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perlae7b909a2009-11-22 22:01:10 +00002045 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002046 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002047
2048 status = be_map_pci_bars(adapter);
2049 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00002050 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002051
2052 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2053 mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
2054 mbox_mem_alloc->size, &mbox_mem_alloc->dma);
2055 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00002056 status = -ENOMEM;
2057 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002058 }
Sathya Perlae7b909a2009-11-22 22:01:10 +00002059
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002060 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2061 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2062 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2063 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00002064
2065 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2066 mc_cmd_mem->va = pci_alloc_consistent(adapter->pdev, mc_cmd_mem->size,
2067 &mc_cmd_mem->dma);
2068 if (mc_cmd_mem->va == NULL) {
2069 status = -ENOMEM;
2070 goto free_mbox;
2071 }
2072 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2073
Sathya Perla8788fdc2009-07-27 22:52:03 +00002074 spin_lock_init(&adapter->mbox_lock);
2075 spin_lock_init(&adapter->mcc_lock);
2076 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002077
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002078 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00002079
2080free_mbox:
2081 pci_free_consistent(adapter->pdev, mbox_mem_alloc->size,
2082 mbox_mem_alloc->va, mbox_mem_alloc->dma);
2083
2084unmap_pci_bars:
2085 be_unmap_pci_bars(adapter);
2086
2087done:
2088 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002089}
2090
2091static void be_stats_cleanup(struct be_adapter *adapter)
2092{
2093 struct be_stats_obj *stats = &adapter->stats;
2094 struct be_dma_mem *cmd = &stats->cmd;
2095
2096 if (cmd->va)
2097 pci_free_consistent(adapter->pdev, cmd->size,
2098 cmd->va, cmd->dma);
2099}
2100
2101static int be_stats_init(struct be_adapter *adapter)
2102{
2103 struct be_stats_obj *stats = &adapter->stats;
2104 struct be_dma_mem *cmd = &stats->cmd;
2105
2106 cmd->size = sizeof(struct be_cmd_req_get_stats);
2107 cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
2108 if (cmd->va == NULL)
2109 return -1;
2110 return 0;
2111}
2112
2113static void __devexit be_remove(struct pci_dev *pdev)
2114{
2115 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00002116
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002117 if (!adapter)
2118 return;
2119
2120 unregister_netdev(adapter->netdev);
2121
Sathya Perla5fb379e2009-06-18 00:02:59 +00002122 be_clear(adapter);
2123
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002124 be_stats_cleanup(adapter);
2125
2126 be_ctrl_cleanup(adapter);
2127
Sathya Perla8d56ff12009-11-22 22:02:26 +00002128 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002129
2130 pci_set_drvdata(pdev, NULL);
2131 pci_release_regions(pdev);
2132 pci_disable_device(pdev);
2133
2134 free_netdev(adapter->netdev);
2135}
2136
Sathya Perla2243e2e2009-11-22 22:02:03 +00002137static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002138{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002139 int status;
Sathya Perla2243e2e2009-11-22 22:02:03 +00002140 u8 mac[ETH_ALEN];
Sathya Perla43a04fdc2009-10-14 20:21:17 +00002141
Sathya Perla8788fdc2009-07-27 22:52:03 +00002142 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002143 if (status)
2144 return status;
2145
Ajit Khapardedcb9b562009-09-30 21:58:22 -07002146 status = be_cmd_query_fw_cfg(adapter,
2147 &adapter->port_num, &adapter->cap);
Sathya Perla2243e2e2009-11-22 22:02:03 +00002148 if (status)
2149 return status;
2150
2151 memset(mac, 0, ETH_ALEN);
2152 status = be_cmd_mac_addr_query(adapter, mac,
2153 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
2154 if (status)
2155 return status;
Ajit Khapardeca9e4982009-11-29 17:56:26 +00002156
2157 if (!is_valid_ether_addr(mac))
2158 return -EADDRNOTAVAIL;
2159
Sathya Perla2243e2e2009-11-22 22:02:03 +00002160 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
Ajit Khaparde35a65282009-11-29 17:55:39 +00002161 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perla2243e2e2009-11-22 22:02:03 +00002162
2163 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002164}
2165
2166static int __devinit be_probe(struct pci_dev *pdev,
2167 const struct pci_device_id *pdev_id)
2168{
2169 int status = 0;
2170 struct be_adapter *adapter;
2171 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002172
2173 status = pci_enable_device(pdev);
2174 if (status)
2175 goto do_none;
2176
2177 status = pci_request_regions(pdev, DRV_NAME);
2178 if (status)
2179 goto disable_dev;
2180 pci_set_master(pdev);
2181
2182 netdev = alloc_etherdev(sizeof(struct be_adapter));
2183 if (netdev == NULL) {
2184 status = -ENOMEM;
2185 goto rel_reg;
2186 }
2187 adapter = netdev_priv(netdev);
2188 adapter->pdev = pdev;
2189 pci_set_drvdata(pdev, adapter);
2190 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00002191 be_netdev_init(netdev);
2192 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002193
2194 be_msix_enable(adapter);
2195
Yang Hongyange9304382009-04-13 14:40:14 -07002196 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002197 if (!status) {
2198 netdev->features |= NETIF_F_HIGHDMA;
2199 } else {
Yang Hongyange9304382009-04-13 14:40:14 -07002200 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002201 if (status) {
2202 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
2203 goto free_netdev;
2204 }
2205 }
2206
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002207 status = be_ctrl_init(adapter);
2208 if (status)
2209 goto free_netdev;
2210
Sathya Perla2243e2e2009-11-22 22:02:03 +00002211 /* sync up with fw's ready state */
2212 status = be_cmd_POST(adapter);
2213 if (status)
2214 goto ctrl_clean;
2215
2216 /* tell fw we're ready to fire cmds */
2217 status = be_cmd_fw_init(adapter);
2218 if (status)
2219 goto ctrl_clean;
2220
2221 status = be_cmd_reset_function(adapter);
2222 if (status)
2223 goto ctrl_clean;
2224
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002225 status = be_stats_init(adapter);
2226 if (status)
2227 goto ctrl_clean;
2228
Sathya Perla2243e2e2009-11-22 22:02:03 +00002229 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002230 if (status)
2231 goto stats_clean;
2232
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002233 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002234
Sathya Perla5fb379e2009-06-18 00:02:59 +00002235 status = be_setup(adapter);
2236 if (status)
2237 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00002238
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002239 status = register_netdev(netdev);
2240 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00002241 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002242
Ajit Khapardec4ca2372009-05-18 15:38:55 -07002243 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002244 return 0;
2245
Sathya Perla5fb379e2009-06-18 00:02:59 +00002246unsetup:
2247 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002248stats_clean:
2249 be_stats_cleanup(adapter);
2250ctrl_clean:
2251 be_ctrl_cleanup(adapter);
2252free_netdev:
Sathya Perla8d56ff12009-11-22 22:02:26 +00002253 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002254 free_netdev(adapter->netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00002255 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002256rel_reg:
2257 pci_release_regions(pdev);
2258disable_dev:
2259 pci_disable_device(pdev);
2260do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07002261 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002262 return status;
2263}
2264
2265static int be_suspend(struct pci_dev *pdev, pm_message_t state)
2266{
2267 struct be_adapter *adapter = pci_get_drvdata(pdev);
2268 struct net_device *netdev = adapter->netdev;
2269
2270 netif_device_detach(netdev);
2271 if (netif_running(netdev)) {
2272 rtnl_lock();
2273 be_close(netdev);
2274 rtnl_unlock();
2275 }
Ajit Khaparde9e90c962009-11-06 02:06:59 +00002276 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00002277 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002278
2279 pci_save_state(pdev);
2280 pci_disable_device(pdev);
2281 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2282 return 0;
2283}
2284
2285static int be_resume(struct pci_dev *pdev)
2286{
2287 int status = 0;
2288 struct be_adapter *adapter = pci_get_drvdata(pdev);
2289 struct net_device *netdev = adapter->netdev;
2290
2291 netif_device_detach(netdev);
2292
2293 status = pci_enable_device(pdev);
2294 if (status)
2295 return status;
2296
2297 pci_set_power_state(pdev, 0);
2298 pci_restore_state(pdev);
2299
Sathya Perla2243e2e2009-11-22 22:02:03 +00002300 /* tell fw we're ready to fire cmds */
2301 status = be_cmd_fw_init(adapter);
2302 if (status)
2303 return status;
2304
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00002305 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002306 if (netif_running(netdev)) {
2307 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002308 be_open(netdev);
2309 rtnl_unlock();
2310 }
2311 netif_device_attach(netdev);
2312 return 0;
2313}
2314
2315static struct pci_driver be_driver = {
2316 .name = DRV_NAME,
2317 .id_table = be_dev_ids,
2318 .probe = be_probe,
2319 .remove = be_remove,
2320 .suspend = be_suspend,
2321 .resume = be_resume
2322};
2323
2324static int __init be_init_module(void)
2325{
2326 if (rx_frag_size != 8192 && rx_frag_size != 4096
2327 && rx_frag_size != 2048) {
2328 printk(KERN_WARNING DRV_NAME
2329 " : Module param rx_frag_size must be 2048/4096/8192."
2330 " Using 2048\n");
2331 rx_frag_size = 2048;
2332 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002333
2334 return pci_register_driver(&be_driver);
2335}
2336module_init(be_init_module);
2337
2338static void __exit be_exit_module(void)
2339{
2340 pci_unregister_driver(&be_driver);
2341}
2342module_exit(be_exit_module);