blob: 8aa1dc1155a95e9b42e85d55aafe64f20ba5d789 [file] [log] [blame]
Don Skidmorefe15e8e2010-11-16 19:27:16 -08001/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
Don Skidmorea52055e2011-02-23 09:58:39 +00004 Copyright(c) 1999 - 2011 Intel Corporation.
Don Skidmorefe15e8e2010-11-16 19:27:16 -08005
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/pci.h>
29#include <linux/delay.h>
30#include <linux/sched.h>
31
32#include "ixgbe.h"
33#include "ixgbe_phy.h"
Don Skidmorefe15e8e2010-11-16 19:27:16 -080034
35#define IXGBE_X540_MAX_TX_QUEUES 128
36#define IXGBE_X540_MAX_RX_QUEUES 128
37#define IXGBE_X540_RAR_ENTRIES 128
38#define IXGBE_X540_MC_TBL_SIZE 128
39#define IXGBE_X540_VFT_TBL_SIZE 128
40
41static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
42static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
43static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
44static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
45static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw);
46static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw);
47
Don Skidmoreb93a2222010-11-16 19:27:17 -080048static enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
Don Skidmorefe15e8e2010-11-16 19:27:16 -080049{
50 return ixgbe_media_type_copper;
51}
52
53static s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
54{
55 struct ixgbe_mac_info *mac = &hw->mac;
56
57 /* Call PHY identify routine to get the phy type */
58 ixgbe_identify_phy_generic(hw);
59
60 mac->mcft_size = IXGBE_X540_MC_TBL_SIZE;
61 mac->vft_size = IXGBE_X540_VFT_TBL_SIZE;
62 mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES;
63 mac->max_rx_queues = IXGBE_X540_MAX_RX_QUEUES;
64 mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES;
65 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
66
67 return 0;
68}
69
70/**
71 * ixgbe_setup_mac_link_X540 - Set the auto advertised capabilitires
72 * @hw: pointer to hardware structure
73 * @speed: new link speed
74 * @autoneg: true if autonegotiation enabled
75 * @autoneg_wait_to_complete: true when waiting for completion is needed
76 **/
Don Skidmoreb93a2222010-11-16 19:27:17 -080077static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
78 ixgbe_link_speed speed, bool autoneg,
79 bool autoneg_wait_to_complete)
Don Skidmorefe15e8e2010-11-16 19:27:16 -080080{
81 return hw->phy.ops.setup_link_speed(hw, speed, autoneg,
82 autoneg_wait_to_complete);
83}
84
85/**
86 * ixgbe_reset_hw_X540 - Perform hardware reset
87 * @hw: pointer to hardware structure
88 *
89 * Resets the hardware by resetting the transmit and receive units, masks
90 * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
91 * reset.
92 **/
Don Skidmoreb93a2222010-11-16 19:27:17 -080093static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
Don Skidmorefe15e8e2010-11-16 19:27:16 -080094{
95 ixgbe_link_speed link_speed;
96 s32 status = 0;
97 u32 ctrl;
98 u32 ctrl_ext;
99 u32 reset_bit;
100 u32 i;
101 u32 autoc;
102 u32 autoc2;
103 bool link_up = false;
104
105 /* Call adapter stop to disable tx/rx and clear interrupts */
106 hw->mac.ops.stop_adapter(hw);
107
108 /*
109 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
110 * access and verify no pending requests before reset
111 */
Emil Tantilova4297dc2011-02-14 08:45:13 +0000112 ixgbe_disable_pcie_master(hw);
Don Skidmorefe15e8e2010-11-16 19:27:16 -0800113
Emil Tantilova4297dc2011-02-14 08:45:13 +0000114mac_reset_top:
Don Skidmorefe15e8e2010-11-16 19:27:16 -0800115 /*
116 * Issue global reset to the MAC. Needs to be SW reset if link is up.
117 * If link reset is used when link is up, it might reset the PHY when
118 * mng is using it. If link is down or the flag to force full link
119 * reset is set, then perform link reset.
120 */
121 if (hw->force_full_reset) {
122 reset_bit = IXGBE_CTRL_LNK_RST;
123 } else {
124 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
125 if (!link_up)
126 reset_bit = IXGBE_CTRL_LNK_RST;
127 else
128 reset_bit = IXGBE_CTRL_RST;
129 }
130
131 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
Emil Tantilov2c4db942011-01-05 07:09:41 +0000132 IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | reset_bit));
Don Skidmorefe15e8e2010-11-16 19:27:16 -0800133 IXGBE_WRITE_FLUSH(hw);
134
135 /* Poll for reset bit to self-clear indicating reset is complete */
136 for (i = 0; i < 10; i++) {
137 udelay(1);
138 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
Emil Tantilov2c4db942011-01-05 07:09:41 +0000139 if (!(ctrl & reset_bit))
Don Skidmorefe15e8e2010-11-16 19:27:16 -0800140 break;
141 }
Emil Tantilov2c4db942011-01-05 07:09:41 +0000142 if (ctrl & reset_bit) {
Don Skidmorefe15e8e2010-11-16 19:27:16 -0800143 status = IXGBE_ERR_RESET_FAILED;
144 hw_dbg(hw, "Reset polling failed to complete.\n");
145 }
146
Emil Tantilova4297dc2011-02-14 08:45:13 +0000147 /*
148 * Double resets are required for recovery from certain error
149 * conditions. Between resets, it is necessary to stall to allow time
150 * for any pending HW events to complete. We use 1usec since that is
151 * what is needed for ixgbe_disable_pcie_master(). The second reset
152 * then clears out any effects of those events.
153 */
154 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
155 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
156 udelay(1);
157 goto mac_reset_top;
158 }
159
Don Skidmorefe15e8e2010-11-16 19:27:16 -0800160 /* Clear PF Reset Done bit so PF/VF Mail Ops can work */
161 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
162 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
163 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
164
165 msleep(50);
166
167 /* Set the Rx packet buffer size. */
168 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT);
169
170 /* Store the permanent mac address */
171 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
172
173 /*
174 * Store the original AUTOC/AUTOC2 values if they have not been
175 * stored off yet. Otherwise restore the stored original
176 * values since the reset operation sets back to defaults.
177 */
178 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
179 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
180 if (hw->mac.orig_link_settings_stored == false) {
181 hw->mac.orig_autoc = autoc;
182 hw->mac.orig_autoc2 = autoc2;
183 hw->mac.orig_link_settings_stored = true;
184 } else {
185 if (autoc != hw->mac.orig_autoc)
186 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc |
187 IXGBE_AUTOC_AN_RESTART));
188
189 if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
190 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
191 autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
192 autoc2 |= (hw->mac.orig_autoc2 &
193 IXGBE_AUTOC2_UPPER_MASK);
194 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
195 }
196 }
197
198 /*
199 * Store MAC address from RAR0, clear receive address registers, and
200 * clear the multicast table. Also reset num_rar_entries to 128,
201 * since we modify this value when programming the SAN MAC address.
202 */
Greg Rose93cb38d2011-03-01 04:37:15 +0000203 hw->mac.num_rar_entries = IXGBE_X540_MAX_TX_QUEUES;
Don Skidmorefe15e8e2010-11-16 19:27:16 -0800204 hw->mac.ops.init_rx_addrs(hw);
205
206 /* Store the permanent mac address */
207 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
208
209 /* Store the permanent SAN mac address */
210 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
211
212 /* Add the SAN MAC address to the RAR only if it's a valid address */
213 if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
214 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
215 hw->mac.san_addr, 0, IXGBE_RAH_AV);
216
217 /* Reserve the last RAR for the SAN MAC address */
218 hw->mac.num_rar_entries--;
219 }
220
221 /* Store the alternative WWNN/WWPN prefix */
222 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
223 &hw->mac.wwpn_prefix);
224
225 return status;
226}
227
228/**
Emil Tantilov7184b7c2011-03-18 08:18:22 +0000229 * ixgbe_start_hw_X540 - Prepare hardware for Tx/Rx
230 * @hw: pointer to hardware structure
231 *
232 * Starts the hardware using the generic start_hw function
233 * and the generation start_hw function.
234 * Then performs revision-specific operations, if any.
235 **/
236static s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
237{
238 s32 ret_val = 0;
239
240 ret_val = ixgbe_start_hw_generic(hw);
241 if (ret_val != 0)
242 goto out;
243
244 ret_val = ixgbe_start_hw_gen2(hw);
245
246out:
247 return ret_val;
248}
249
250/**
Don Skidmorefe15e8e2010-11-16 19:27:16 -0800251 * ixgbe_get_supported_physical_layer_X540 - Returns physical layer type
252 * @hw: pointer to hardware structure
253 *
254 * Determines physical layer capabilities of the current configuration.
255 **/
Don Skidmoreb93a2222010-11-16 19:27:17 -0800256static u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw)
Don Skidmorefe15e8e2010-11-16 19:27:16 -0800257{
258 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
259 u16 ext_ability = 0;
260
261 hw->phy.ops.identify(hw);
262
263 hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD,
264 &ext_ability);
265 if (ext_ability & MDIO_PMA_EXTABLE_10GBT)
266 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
267 if (ext_ability & MDIO_PMA_EXTABLE_1000BT)
268 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
269 if (ext_ability & MDIO_PMA_EXTABLE_100BTX)
270 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
271
272 return physical_layer;
273}
274
275/**
Emil Tantilov77ed18f2011-03-03 09:24:56 +0000276 * ixgbe_init_eeprom_params_X540 - Initialize EEPROM params
277 * @hw: pointer to hardware structure
278 *
279 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
280 * ixgbe_hw struct in order to set up EEPROM access.
Don Skidmorefe15e8e2010-11-16 19:27:16 -0800281 **/
Don Skidmoreb93a2222010-11-16 19:27:17 -0800282static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
Don Skidmorefe15e8e2010-11-16 19:27:16 -0800283{
284 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
285 u32 eec;
286 u16 eeprom_size;
287
288 if (eeprom->type == ixgbe_eeprom_uninitialized) {
289 eeprom->semaphore_delay = 10;
290 eeprom->type = ixgbe_flash;
291
292 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
293 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
294 IXGBE_EEC_SIZE_SHIFT);
295 eeprom->word_size = 1 << (eeprom_size +
296 IXGBE_EEPROM_WORD_SIZE_SHIFT);
297
298 hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
Emil Tantilov77ed18f2011-03-03 09:24:56 +0000299 eeprom->type, eeprom->word_size);
Don Skidmorefe15e8e2010-11-16 19:27:16 -0800300 }
301
302 return 0;
303}
304
305/**
306 * ixgbe_read_eerd_X540 - Read EEPROM word using EERD
307 * @hw: pointer to hardware structure
308 * @offset: offset of word in the EEPROM to read
309 * @data: word read from the EERPOM
310 **/
Don Skidmoreb93a2222010-11-16 19:27:17 -0800311static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
Don Skidmorefe15e8e2010-11-16 19:27:16 -0800312{
313 s32 status;
314
Don Skidmore5e655102011-02-25 01:58:04 +0000315 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0)
Don Skidmorefe15e8e2010-11-16 19:27:16 -0800316 status = ixgbe_read_eerd_generic(hw, offset, data);
317 else
318 status = IXGBE_ERR_SWFW_SYNC;
319
320 ixgbe_release_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM);
321 return status;
322}
323
324/**
325 * ixgbe_write_eewr_X540 - Write EEPROM word using EEWR
326 * @hw: pointer to hardware structure
327 * @offset: offset of word in the EEPROM to write
328 * @data: word write to the EEPROM
329 *
330 * Write a 16 bit word to the EEPROM using the EEWR register.
331 **/
Don Skidmoreb93a2222010-11-16 19:27:17 -0800332static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
Don Skidmorefe15e8e2010-11-16 19:27:16 -0800333{
334 u32 eewr;
335 s32 status;
336
337 hw->eeprom.ops.init_params(hw);
338
339 if (offset >= hw->eeprom.word_size) {
340 status = IXGBE_ERR_EEPROM;
341 goto out;
342 }
343
344 eewr = (offset << IXGBE_EEPROM_RW_ADDR_SHIFT) |
345 (data << IXGBE_EEPROM_RW_REG_DATA) |
346 IXGBE_EEPROM_RW_REG_START;
347
Don Skidmore5e655102011-02-25 01:58:04 +0000348 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) {
Don Skidmorefe15e8e2010-11-16 19:27:16 -0800349 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
350 if (status != 0) {
351 hw_dbg(hw, "Eeprom write EEWR timed out\n");
352 goto out;
353 }
354
355 IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
356
357 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
358 if (status != 0) {
359 hw_dbg(hw, "Eeprom write EEWR timed out\n");
360 goto out;
361 }
362 } else {
363 status = IXGBE_ERR_SWFW_SYNC;
364 }
365
366out:
367 ixgbe_release_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM);
368 return status;
369}
370
371/**
372 * ixgbe_calc_eeprom_checksum_X540 - Calculates and returns the checksum
373 * @hw: pointer to hardware structure
374 **/
375static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
376{
377 u16 i;
378 u16 j;
379 u16 checksum = 0;
380 u16 length = 0;
381 u16 pointer = 0;
382 u16 word = 0;
383
384 /* Include 0x0-0x3F in the checksum */
385 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
386 if (hw->eeprom.ops.read(hw, i, &word) != 0) {
387 hw_dbg(hw, "EEPROM read failed\n");
388 break;
389 }
390 checksum += word;
391 }
392
393 /*
394 * Include all data from pointers 0x3, 0x6-0xE. This excludes the
395 * FW, PHY module, and PCIe Expansion/Option ROM pointers.
396 */
397 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
398 if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
399 continue;
400
401 if (hw->eeprom.ops.read(hw, i, &pointer) != 0) {
402 hw_dbg(hw, "EEPROM read failed\n");
403 break;
404 }
405
406 /* Skip pointer section if the pointer is invalid. */
407 if (pointer == 0xFFFF || pointer == 0 ||
408 pointer >= hw->eeprom.word_size)
409 continue;
410
411 if (hw->eeprom.ops.read(hw, pointer, &length) != 0) {
412 hw_dbg(hw, "EEPROM read failed\n");
413 break;
414 }
415
416 /* Skip pointer section if length is invalid. */
417 if (length == 0xFFFF || length == 0 ||
418 (pointer + length) >= hw->eeprom.word_size)
419 continue;
420
421 for (j = pointer+1; j <= pointer+length; j++) {
422 if (hw->eeprom.ops.read(hw, j, &word) != 0) {
423 hw_dbg(hw, "EEPROM read failed\n");
424 break;
425 }
426 checksum += word;
427 }
428 }
429
430 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
431
432 return checksum;
433}
434
435/**
436 * ixgbe_update_eeprom_checksum_X540 - Updates the EEPROM checksum and flash
437 * @hw: pointer to hardware structure
438 *
439 * After writing EEPROM to shadow RAM using EEWR register, software calculates
440 * checksum and updates the EEPROM and instructs the hardware to update
441 * the flash.
442 **/
Don Skidmoreb93a2222010-11-16 19:27:17 -0800443static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
Don Skidmorefe15e8e2010-11-16 19:27:16 -0800444{
445 s32 status;
446
447 status = ixgbe_update_eeprom_checksum_generic(hw);
448
449 if (status)
450 status = ixgbe_update_flash_X540(hw);
451
452 return status;
453}
454
455/**
456 * ixgbe_update_flash_X540 - Instruct HW to copy EEPROM to Flash device
457 * @hw: pointer to hardware structure
458 *
459 * Set FLUP (bit 23) of the EEC register to instruct Hardware to copy
460 * EEPROM from shadow RAM to the flash device.
461 **/
462static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
463{
464 u32 flup;
465 s32 status = IXGBE_ERR_EEPROM;
466
467 status = ixgbe_poll_flash_update_done_X540(hw);
468 if (status == IXGBE_ERR_EEPROM) {
469 hw_dbg(hw, "Flash update time out\n");
470 goto out;
471 }
472
473 flup = IXGBE_READ_REG(hw, IXGBE_EEC) | IXGBE_EEC_FLUP;
474 IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
475
476 status = ixgbe_poll_flash_update_done_X540(hw);
Emil Tantilov2ea5ea52011-03-12 08:56:38 +0000477 if (status == 0)
Don Skidmorefe15e8e2010-11-16 19:27:16 -0800478 hw_dbg(hw, "Flash update complete\n");
479 else
480 hw_dbg(hw, "Flash update time out\n");
481
482 if (hw->revision_id == 0) {
483 flup = IXGBE_READ_REG(hw, IXGBE_EEC);
484
485 if (flup & IXGBE_EEC_SEC1VAL) {
486 flup |= IXGBE_EEC_FLUP;
487 IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
488 }
489
490 status = ixgbe_poll_flash_update_done_X540(hw);
Emil Tantilov2ea5ea52011-03-12 08:56:38 +0000491 if (status == 0)
Don Skidmorefe15e8e2010-11-16 19:27:16 -0800492 hw_dbg(hw, "Flash update complete\n");
493 else
494 hw_dbg(hw, "Flash update time out\n");
Don Skidmorefe15e8e2010-11-16 19:27:16 -0800495 }
496out:
497 return status;
498}
499
500/**
501 * ixgbe_poll_flash_update_done_X540 - Poll flash update status
502 * @hw: pointer to hardware structure
503 *
504 * Polls the FLUDONE (bit 26) of the EEC Register to determine when the
505 * flash update is done.
506 **/
507static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
508{
509 u32 i;
510 u32 reg;
511 s32 status = IXGBE_ERR_EEPROM;
512
513 for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) {
514 reg = IXGBE_READ_REG(hw, IXGBE_EEC);
515 if (reg & IXGBE_EEC_FLUDONE) {
516 status = 0;
517 break;
518 }
519 udelay(5);
520 }
521 return status;
522}
523
524/**
525 * ixgbe_acquire_swfw_sync_X540 - Acquire SWFW semaphore
526 * @hw: pointer to hardware structure
527 * @mask: Mask to specify which semaphore to acquire
528 *
529 * Acquires the SWFW semaphore thought the SW_FW_SYNC register for
530 * the specified function (CSR, PHY0, PHY1, NVM, Flash)
531 **/
532static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
533{
534 u32 swfw_sync;
535 u32 swmask = mask;
536 u32 fwmask = mask << 5;
537 u32 hwmask = 0;
538 u32 timeout = 200;
539 u32 i;
540
541 if (swmask == IXGBE_GSSR_EEP_SM)
542 hwmask = IXGBE_GSSR_FLASH_SM;
543
544 for (i = 0; i < timeout; i++) {
545 /*
546 * SW NVM semaphore bit is used for access to all
547 * SW_FW_SYNC bits (not just NVM)
548 */
549 if (ixgbe_get_swfw_sync_semaphore(hw))
550 return IXGBE_ERR_SWFW_SYNC;
551
552 swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
553 if (!(swfw_sync & (fwmask | swmask | hwmask))) {
554 swfw_sync |= swmask;
555 IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
556 ixgbe_release_swfw_sync_semaphore(hw);
557 break;
558 } else {
559 /*
560 * Firmware currently using resource (fwmask),
561 * hardware currently using resource (hwmask),
562 * or other software thread currently using
563 * resource (swmask)
564 */
565 ixgbe_release_swfw_sync_semaphore(hw);
Don Skidmore032b4322011-03-18 09:32:53 +0000566 usleep_range(5000, 10000);
Don Skidmorefe15e8e2010-11-16 19:27:16 -0800567 }
568 }
569
570 /*
571 * If the resource is not released by the FW/HW the SW can assume that
572 * the FW/HW malfunctions. In that case the SW should sets the
573 * SW bit(s) of the requested resource(s) while ignoring the
574 * corresponding FW/HW bits in the SW_FW_SYNC register.
575 */
576 if (i >= timeout) {
577 swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
578 if (swfw_sync & (fwmask | hwmask)) {
579 if (ixgbe_get_swfw_sync_semaphore(hw))
580 return IXGBE_ERR_SWFW_SYNC;
581
582 swfw_sync |= swmask;
583 IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
584 ixgbe_release_swfw_sync_semaphore(hw);
585 }
586 }
587
Don Skidmore032b4322011-03-18 09:32:53 +0000588 usleep_range(5000, 10000);
Don Skidmorefe15e8e2010-11-16 19:27:16 -0800589 return 0;
590}
591
592/**
593 * ixgbe_release_swfw_sync_X540 - Release SWFW semaphore
594 * @hw: pointer to hardware structure
595 * @mask: Mask to specify which semaphore to release
596 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300597 * Releases the SWFW semaphore through the SW_FW_SYNC register
Don Skidmorefe15e8e2010-11-16 19:27:16 -0800598 * for the specified function (CSR, PHY0, PHY1, EVM, Flash)
599 **/
600static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
601{
602 u32 swfw_sync;
603 u32 swmask = mask;
604
605 ixgbe_get_swfw_sync_semaphore(hw);
606
607 swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
608 swfw_sync &= ~swmask;
609 IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
610
611 ixgbe_release_swfw_sync_semaphore(hw);
Don Skidmore032b4322011-03-18 09:32:53 +0000612 usleep_range(5000, 10000);
Don Skidmorefe15e8e2010-11-16 19:27:16 -0800613}
614
615/**
616 * ixgbe_get_nvm_semaphore - Get hardware semaphore
617 * @hw: pointer to hardware structure
618 *
619 * Sets the hardware semaphores so SW/FW can gain control of shared resources
620 **/
621static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
622{
623 s32 status = IXGBE_ERR_EEPROM;
624 u32 timeout = 2000;
625 u32 i;
626 u32 swsm;
627
628 /* Get SMBI software semaphore between device drivers first */
629 for (i = 0; i < timeout; i++) {
630 /*
631 * If the SMBI bit is 0 when we read it, then the bit will be
632 * set and we have the semaphore
633 */
634 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
635 if (!(swsm & IXGBE_SWSM_SMBI)) {
636 status = 0;
637 break;
638 }
639 udelay(50);
640 }
641
642 /* Now get the semaphore between SW/FW through the REGSMP bit */
643 if (status) {
644 for (i = 0; i < timeout; i++) {
645 swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
646 if (!(swsm & IXGBE_SWFW_REGSMP))
647 break;
648
649 udelay(50);
650 }
651 } else {
652 hw_dbg(hw, "Software semaphore SMBI between device drivers "
653 "not granted.\n");
654 }
655
656 return status;
657}
658
659/**
660 * ixgbe_release_nvm_semaphore - Release hardware semaphore
661 * @hw: pointer to hardware structure
662 *
663 * This function clears hardware semaphore bits.
664 **/
665static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
666{
667 u32 swsm;
668
669 /* Release both semaphores by writing 0 to the bits REGSMP and SMBI */
670
671 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
672 swsm &= ~IXGBE_SWSM_SMBI;
673 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
674
675 swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
676 swsm &= ~IXGBE_SWFW_REGSMP;
677 IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swsm);
678
679 IXGBE_WRITE_FLUSH(hw);
680}
681
682static struct ixgbe_mac_operations mac_ops_X540 = {
683 .init_hw = &ixgbe_init_hw_generic,
684 .reset_hw = &ixgbe_reset_hw_X540,
Emil Tantilov7184b7c2011-03-18 08:18:22 +0000685 .start_hw = &ixgbe_start_hw_X540,
Don Skidmorefe15e8e2010-11-16 19:27:16 -0800686 .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic,
687 .get_media_type = &ixgbe_get_media_type_X540,
688 .get_supported_physical_layer =
689 &ixgbe_get_supported_physical_layer_X540,
690 .enable_rx_dma = &ixgbe_enable_rx_dma_generic,
691 .get_mac_addr = &ixgbe_get_mac_addr_generic,
692 .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
693 .get_device_caps = NULL,
694 .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic,
695 .stop_adapter = &ixgbe_stop_adapter_generic,
696 .get_bus_info = &ixgbe_get_bus_info_generic,
697 .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie,
698 .read_analog_reg8 = NULL,
699 .write_analog_reg8 = NULL,
700 .setup_link = &ixgbe_setup_mac_link_X540,
701 .check_link = &ixgbe_check_mac_link_generic,
702 .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic,
703 .led_on = &ixgbe_led_on_generic,
704 .led_off = &ixgbe_led_off_generic,
705 .blink_led_start = &ixgbe_blink_led_start_generic,
706 .blink_led_stop = &ixgbe_blink_led_stop_generic,
707 .set_rar = &ixgbe_set_rar_generic,
708 .clear_rar = &ixgbe_clear_rar_generic,
709 .set_vmdq = &ixgbe_set_vmdq_generic,
710 .clear_vmdq = &ixgbe_clear_vmdq_generic,
711 .init_rx_addrs = &ixgbe_init_rx_addrs_generic,
Don Skidmorefe15e8e2010-11-16 19:27:16 -0800712 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
713 .enable_mc = &ixgbe_enable_mc_generic,
714 .disable_mc = &ixgbe_disable_mc_generic,
715 .clear_vfta = &ixgbe_clear_vfta_generic,
716 .set_vfta = &ixgbe_set_vfta_generic,
717 .fc_enable = &ixgbe_fc_enable_generic,
718 .init_uta_tables = &ixgbe_init_uta_tables_generic,
719 .setup_sfp = NULL,
Greg Rose3377eba792010-12-07 08:16:45 +0000720 .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing,
721 .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
Don Skidmore5e655102011-02-25 01:58:04 +0000722 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540,
723 .release_swfw_sync = &ixgbe_release_swfw_sync_X540,
Don Skidmorefe15e8e2010-11-16 19:27:16 -0800724};
725
726static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
727 .init_params = &ixgbe_init_eeprom_params_X540,
728 .read = &ixgbe_read_eerd_X540,
729 .write = &ixgbe_write_eewr_X540,
730 .calc_checksum = &ixgbe_calc_eeprom_checksum_X540,
731 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
732 .update_checksum = &ixgbe_update_eeprom_checksum_X540,
733};
734
735static struct ixgbe_phy_operations phy_ops_X540 = {
736 .identify = &ixgbe_identify_phy_generic,
737 .identify_sfp = &ixgbe_identify_sfp_module_generic,
738 .init = NULL,
Don Skidmoreb60c5dd2011-02-18 19:29:46 +0000739 .reset = NULL,
Don Skidmorefe15e8e2010-11-16 19:27:16 -0800740 .read_reg = &ixgbe_read_phy_reg_generic,
741 .write_reg = &ixgbe_write_phy_reg_generic,
742 .setup_link = &ixgbe_setup_phy_link_generic,
743 .setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
744 .read_i2c_byte = &ixgbe_read_i2c_byte_generic,
745 .write_i2c_byte = &ixgbe_write_i2c_byte_generic,
746 .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic,
747 .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic,
748 .check_overtemp = &ixgbe_tn_check_overtemp,
749};
750
751struct ixgbe_info ixgbe_X540_info = {
752 .mac = ixgbe_mac_X540,
753 .get_invariants = &ixgbe_get_invariants_X540,
754 .mac_ops = &mac_ops_X540,
755 .eeprom_ops = &eeprom_ops_X540,
756 .phy_ops = &phy_ops_X540,
757 .mbx_ops = &mbx_ops_generic,
758};