blob: 4efb47497e6bb30400291bc3f36fcd070acdf334 [file] [log] [blame]
Sasha Neftin13b5b7f2018-10-11 10:17:19 +03001// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018 Intel Corporation */
3
4#include <linux/delay.h>
5
6#include "igc_hw.h"
7#include "igc_i225.h"
Sasha Neftinc0071c72018-10-11 10:17:26 +03008#include "igc_mac.h"
9#include "igc_base.h"
10#include "igc.h"
11
12/**
13 * igc_set_pcie_completion_timeout - set pci-e completion timeout
14 * @hw: pointer to the HW structure
15 */
16static s32 igc_set_pcie_completion_timeout(struct igc_hw *hw)
17{
18 u32 gcr = rd32(IGC_GCR);
19 u16 pcie_devctl2;
20 s32 ret_val = 0;
21
22 /* only take action if timeout value is defaulted to 0 */
23 if (gcr & IGC_GCR_CMPL_TMOUT_MASK)
24 goto out;
25
26 /* if capabilities version is type 1 we can write the
27 * timeout of 10ms to 200ms through the GCR register
28 */
29 if (!(gcr & IGC_GCR_CAP_VER2)) {
30 gcr |= IGC_GCR_CMPL_TMOUT_10ms;
31 goto out;
32 }
33
34 /* for version 2 capabilities we need to write the config space
35 * directly in order to set the completion timeout value for
36 * 16ms to 55ms
37 */
38 ret_val = igc_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
39 &pcie_devctl2);
40 if (ret_val)
41 goto out;
42
43 pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
44
45 ret_val = igc_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
46 &pcie_devctl2);
47out:
48 /* disable completion timeout resend */
49 gcr &= ~IGC_GCR_CMPL_TMOUT_RESEND;
50
51 wr32(IGC_GCR, gcr);
52
53 return ret_val;
54}
55
56/**
57 * igc_reset_hw_base - Reset hardware
58 * @hw: pointer to the HW structure
59 *
60 * This resets the hardware into a known state. This is a
61 * function pointer entry point called by the api module.
62 */
63static s32 igc_reset_hw_base(struct igc_hw *hw)
64{
65 s32 ret_val;
66 u32 ctrl;
67
68 /* Prevent the PCI-E bus from sticking if there is no TLP connection
69 * on the last TLP read/write transaction when MAC is reset.
70 */
71 ret_val = igc_disable_pcie_master(hw);
72 if (ret_val)
73 hw_dbg("PCI-E Master disable polling has failed.\n");
74
75 /* set the completion timeout for interface */
76 ret_val = igc_set_pcie_completion_timeout(hw);
77 if (ret_val)
78 hw_dbg("PCI-E Set completion timeout has failed.\n");
79
80 hw_dbg("Masking off all interrupts\n");
81 wr32(IGC_IMC, 0xffffffff);
82
83 wr32(IGC_RCTL, 0);
84 wr32(IGC_TCTL, IGC_TCTL_PSP);
85 wrfl();
86
87 usleep_range(10000, 20000);
88
89 ctrl = rd32(IGC_CTRL);
90
91 hw_dbg("Issuing a global reset to MAC\n");
92 wr32(IGC_CTRL, ctrl | IGC_CTRL_RST);
93
94 ret_val = igc_get_auto_rd_done(hw);
95 if (ret_val) {
96 /* When auto config read does not complete, do not
97 * return with an error. This can happen in situations
98 * where there is no eeprom and prevents getting link.
99 */
100 hw_dbg("Auto Read Done did not complete\n");
101 }
102
103 /* Clear any pending interrupt events. */
104 wr32(IGC_IMC, 0xffffffff);
105 rd32(IGC_ICR);
106
107 return ret_val;
108}
109
110/**
111 * igc_init_mac_params_base - Init MAC func ptrs.
112 * @hw: pointer to the HW structure
113 */
114static s32 igc_init_mac_params_base(struct igc_hw *hw)
115{
116 struct igc_mac_info *mac = &hw->mac;
117
118 /* Set mta register count */
119 mac->mta_reg_count = 128;
120 mac->rar_entry_count = IGC_RAR_ENTRIES;
121
122 /* reset */
123 mac->ops.reset_hw = igc_reset_hw_base;
124
125 mac->ops.acquire_swfw_sync = igc_acquire_swfw_sync_i225;
126 mac->ops.release_swfw_sync = igc_release_swfw_sync_i225;
127
128 return 0;
129}
130
131static s32 igc_get_invariants_base(struct igc_hw *hw)
132{
133 u32 link_mode = 0;
134 u32 ctrl_ext = 0;
135 s32 ret_val = 0;
136
137 ctrl_ext = rd32(IGC_CTRL_EXT);
138 link_mode = ctrl_ext & IGC_CTRL_EXT_LINK_MODE_MASK;
139
140 /* mac initialization and operations */
141 ret_val = igc_init_mac_params_base(hw);
142 if (ret_val)
143 goto out;
144
145out:
146 return ret_val;
147}
148
149/**
150 * igc_init_hw_base - Initialize hardware
151 * @hw: pointer to the HW structure
152 *
153 * This inits the hardware readying it for operation.
154 */
155static s32 igc_init_hw_base(struct igc_hw *hw)
156{
157 struct igc_mac_info *mac = &hw->mac;
158 u16 i, rar_count = mac->rar_entry_count;
159 s32 ret_val = 0;
160
161 /* Setup the receive address */
162 igc_init_rx_addrs(hw, rar_count);
163
164 /* Zero out the Multicast HASH table */
165 hw_dbg("Zeroing the MTA\n");
166 for (i = 0; i < mac->mta_reg_count; i++)
167 array_wr32(IGC_MTA, i, 0);
168
169 /* Zero out the Unicast HASH table */
170 hw_dbg("Zeroing the UTA\n");
171 for (i = 0; i < mac->uta_reg_count; i++)
172 array_wr32(IGC_UTA, i, 0);
173
174 /* Setup link and flow control */
175 ret_val = igc_setup_link(hw);
176
177 /* Clear all of the statistics registers (clear on read). It is
178 * important that we do this after we have tried to establish link
179 * because the symbol error count will increment wildly if there
180 * is no link.
181 */
182 igc_clear_hw_cntrs_base(hw);
183
184 return ret_val;
185}
Sasha Neftin13b5b7f2018-10-11 10:17:19 +0300186
187/**
188 * igc_rx_fifo_flush_base - Clean rx fifo after Rx enable
189 * @hw: pointer to the HW structure
190 *
191 * After Rx enable, if manageability is enabled then there is likely some
192 * bad data at the start of the fifo and possibly in the DMA fifo. This
193 * function clears the fifos and flushes any packets that came in as rx was
194 * being enabled.
195 */
196void igc_rx_fifo_flush_base(struct igc_hw *hw)
197{
198 u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
199 int i, ms_wait;
200
201 /* disable IPv6 options as per hardware errata */
202 rfctl = rd32(IGC_RFCTL);
203 rfctl |= IGC_RFCTL_IPV6_EX_DIS;
204 wr32(IGC_RFCTL, rfctl);
205
206 if (!(rd32(IGC_MANC) & IGC_MANC_RCV_TCO_EN))
207 return;
208
209 /* Disable all Rx queues */
210 for (i = 0; i < 4; i++) {
211 rxdctl[i] = rd32(IGC_RXDCTL(i));
212 wr32(IGC_RXDCTL(i),
213 rxdctl[i] & ~IGC_RXDCTL_QUEUE_ENABLE);
214 }
215 /* Poll all queues to verify they have shut down */
216 for (ms_wait = 0; ms_wait < 10; ms_wait++) {
217 usleep_range(1000, 2000);
218 rx_enabled = 0;
219 for (i = 0; i < 4; i++)
220 rx_enabled |= rd32(IGC_RXDCTL(i));
221 if (!(rx_enabled & IGC_RXDCTL_QUEUE_ENABLE))
222 break;
223 }
224
225 if (ms_wait == 10)
226 pr_debug("Queue disable timed out after 10ms\n");
227
228 /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
229 * incoming packets are rejected. Set enable and wait 2ms so that
230 * any packet that was coming in as RCTL.EN was set is flushed
231 */
232 wr32(IGC_RFCTL, rfctl & ~IGC_RFCTL_LEF);
233
234 rlpml = rd32(IGC_RLPML);
235 wr32(IGC_RLPML, 0);
236
237 rctl = rd32(IGC_RCTL);
238 temp_rctl = rctl & ~(IGC_RCTL_EN | IGC_RCTL_SBP);
239 temp_rctl |= IGC_RCTL_LPE;
240
241 wr32(IGC_RCTL, temp_rctl);
242 wr32(IGC_RCTL, temp_rctl | IGC_RCTL_EN);
243 wrfl();
244 usleep_range(2000, 3000);
245
246 /* Enable Rx queues that were previously enabled and restore our
247 * previous state
248 */
249 for (i = 0; i < 4; i++)
250 wr32(IGC_RXDCTL(i), rxdctl[i]);
251 wr32(IGC_RCTL, rctl);
252 wrfl();
253
254 wr32(IGC_RLPML, rlpml);
255 wr32(IGC_RFCTL, rfctl);
256
257 /* Flush receive errors generated by workaround */
258 rd32(IGC_ROC);
259 rd32(IGC_RNBC);
260 rd32(IGC_MPC);
261}
Sasha Neftinc0071c72018-10-11 10:17:26 +0300262
263static struct igc_mac_operations igc_mac_ops_base = {
264 .init_hw = igc_init_hw_base,
265};
266
267const struct igc_info igc_base_info = {
268 .get_invariants = igc_get_invariants_base,
269 .mac_ops = &igc_mac_ops_base,
270};