blob: a971ec31a82ec0d0477ad1281905fae49c3485fd [file] [log] [blame]
Giridhar Malavalia9083012010-04-12 17:59:55 -07001/*
2 * QLogic Fibre Channel HBA Driver
Giridhar Malavalide7c5d02010-07-23 15:28:36 +05003 * Copyright (c) 2003-2010 QLogic Corporation
Giridhar Malavalia9083012010-04-12 17:59:55 -07004 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7#include "qla_def.h"
8#include <linux/delay.h>
9#include <linux/pci.h>
10
11#define MASK(n) ((1ULL<<(n))-1)
12#define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | \
13 ((addr >> 25) & 0x3ff))
14#define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | \
15 ((addr >> 25) & 0x3ff))
16#define MS_WIN(addr) (addr & 0x0ffc0000)
17#define QLA82XX_PCI_MN_2M (0)
18#define QLA82XX_PCI_MS_2M (0x80000)
19#define QLA82XX_PCI_OCM0_2M (0xc0000)
20#define VALID_OCM_ADDR(addr) (((addr) & 0x3f800) != 0x3f800)
21#define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
Lalit Chandivade0547fb32010-05-28 15:08:26 -070022#define BLOCK_PROTECT_BITS 0x0F
Giridhar Malavalia9083012010-04-12 17:59:55 -070023
24/* CRB window related */
25#define CRB_BLK(off) ((off >> 20) & 0x3f)
26#define CRB_SUBBLK(off) ((off >> 16) & 0xf)
27#define CRB_WINDOW_2M (0x130060)
28#define QLA82XX_PCI_CAMQM_2M_END (0x04800800UL)
29#define CRB_HI(off) ((qla82xx_crb_hub_agt[CRB_BLK(off)] << 20) | \
30 ((off) & 0xf0000))
31#define QLA82XX_PCI_CAMQM_2M_BASE (0x000ff800UL)
32#define CRB_INDIRECT_2M (0x1e0000UL)
33
Giridhar Malavalia9083012010-04-12 17:59:55 -070034#define MAX_CRB_XFORM 60
35static unsigned long crb_addr_xform[MAX_CRB_XFORM];
36int qla82xx_crb_table_initialized;
37
38#define qla82xx_crb_addr_transform(name) \
39 (crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \
40 QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20)
41
42static void qla82xx_crb_addr_transform_setup(void)
43{
44 qla82xx_crb_addr_transform(XDMA);
45 qla82xx_crb_addr_transform(TIMR);
46 qla82xx_crb_addr_transform(SRE);
47 qla82xx_crb_addr_transform(SQN3);
48 qla82xx_crb_addr_transform(SQN2);
49 qla82xx_crb_addr_transform(SQN1);
50 qla82xx_crb_addr_transform(SQN0);
51 qla82xx_crb_addr_transform(SQS3);
52 qla82xx_crb_addr_transform(SQS2);
53 qla82xx_crb_addr_transform(SQS1);
54 qla82xx_crb_addr_transform(SQS0);
55 qla82xx_crb_addr_transform(RPMX7);
56 qla82xx_crb_addr_transform(RPMX6);
57 qla82xx_crb_addr_transform(RPMX5);
58 qla82xx_crb_addr_transform(RPMX4);
59 qla82xx_crb_addr_transform(RPMX3);
60 qla82xx_crb_addr_transform(RPMX2);
61 qla82xx_crb_addr_transform(RPMX1);
62 qla82xx_crb_addr_transform(RPMX0);
63 qla82xx_crb_addr_transform(ROMUSB);
64 qla82xx_crb_addr_transform(SN);
65 qla82xx_crb_addr_transform(QMN);
66 qla82xx_crb_addr_transform(QMS);
67 qla82xx_crb_addr_transform(PGNI);
68 qla82xx_crb_addr_transform(PGND);
69 qla82xx_crb_addr_transform(PGN3);
70 qla82xx_crb_addr_transform(PGN2);
71 qla82xx_crb_addr_transform(PGN1);
72 qla82xx_crb_addr_transform(PGN0);
73 qla82xx_crb_addr_transform(PGSI);
74 qla82xx_crb_addr_transform(PGSD);
75 qla82xx_crb_addr_transform(PGS3);
76 qla82xx_crb_addr_transform(PGS2);
77 qla82xx_crb_addr_transform(PGS1);
78 qla82xx_crb_addr_transform(PGS0);
79 qla82xx_crb_addr_transform(PS);
80 qla82xx_crb_addr_transform(PH);
81 qla82xx_crb_addr_transform(NIU);
82 qla82xx_crb_addr_transform(I2Q);
83 qla82xx_crb_addr_transform(EG);
84 qla82xx_crb_addr_transform(MN);
85 qla82xx_crb_addr_transform(MS);
86 qla82xx_crb_addr_transform(CAS2);
87 qla82xx_crb_addr_transform(CAS1);
88 qla82xx_crb_addr_transform(CAS0);
89 qla82xx_crb_addr_transform(CAM);
90 qla82xx_crb_addr_transform(C2C1);
91 qla82xx_crb_addr_transform(C2C0);
92 qla82xx_crb_addr_transform(SMB);
93 qla82xx_crb_addr_transform(OCM0);
94 /*
95 * Used only in P3 just define it for P2 also.
96 */
97 qla82xx_crb_addr_transform(I2C0);
98
99 qla82xx_crb_table_initialized = 1;
100}
101
102struct crb_128M_2M_block_map crb_128M_2M_map[64] = {
103 {{{0, 0, 0, 0} } },
104 {{{1, 0x0100000, 0x0102000, 0x120000},
105 {1, 0x0110000, 0x0120000, 0x130000},
106 {1, 0x0120000, 0x0122000, 0x124000},
107 {1, 0x0130000, 0x0132000, 0x126000},
108 {1, 0x0140000, 0x0142000, 0x128000},
109 {1, 0x0150000, 0x0152000, 0x12a000},
110 {1, 0x0160000, 0x0170000, 0x110000},
111 {1, 0x0170000, 0x0172000, 0x12e000},
112 {0, 0x0000000, 0x0000000, 0x000000},
113 {0, 0x0000000, 0x0000000, 0x000000},
114 {0, 0x0000000, 0x0000000, 0x000000},
115 {0, 0x0000000, 0x0000000, 0x000000},
116 {0, 0x0000000, 0x0000000, 0x000000},
117 {0, 0x0000000, 0x0000000, 0x000000},
118 {1, 0x01e0000, 0x01e0800, 0x122000},
119 {0, 0x0000000, 0x0000000, 0x000000} } } ,
120 {{{1, 0x0200000, 0x0210000, 0x180000} } },
121 {{{0, 0, 0, 0} } },
122 {{{1, 0x0400000, 0x0401000, 0x169000} } },
123 {{{1, 0x0500000, 0x0510000, 0x140000} } },
124 {{{1, 0x0600000, 0x0610000, 0x1c0000} } },
125 {{{1, 0x0700000, 0x0704000, 0x1b8000} } },
126 {{{1, 0x0800000, 0x0802000, 0x170000},
127 {0, 0x0000000, 0x0000000, 0x000000},
128 {0, 0x0000000, 0x0000000, 0x000000},
129 {0, 0x0000000, 0x0000000, 0x000000},
130 {0, 0x0000000, 0x0000000, 0x000000},
131 {0, 0x0000000, 0x0000000, 0x000000},
132 {0, 0x0000000, 0x0000000, 0x000000},
133 {0, 0x0000000, 0x0000000, 0x000000},
134 {0, 0x0000000, 0x0000000, 0x000000},
135 {0, 0x0000000, 0x0000000, 0x000000},
136 {0, 0x0000000, 0x0000000, 0x000000},
137 {0, 0x0000000, 0x0000000, 0x000000},
138 {0, 0x0000000, 0x0000000, 0x000000},
139 {0, 0x0000000, 0x0000000, 0x000000},
140 {0, 0x0000000, 0x0000000, 0x000000},
141 {1, 0x08f0000, 0x08f2000, 0x172000} } },
142 {{{1, 0x0900000, 0x0902000, 0x174000},
143 {0, 0x0000000, 0x0000000, 0x000000},
144 {0, 0x0000000, 0x0000000, 0x000000},
145 {0, 0x0000000, 0x0000000, 0x000000},
146 {0, 0x0000000, 0x0000000, 0x000000},
147 {0, 0x0000000, 0x0000000, 0x000000},
148 {0, 0x0000000, 0x0000000, 0x000000},
149 {0, 0x0000000, 0x0000000, 0x000000},
150 {0, 0x0000000, 0x0000000, 0x000000},
151 {0, 0x0000000, 0x0000000, 0x000000},
152 {0, 0x0000000, 0x0000000, 0x000000},
153 {0, 0x0000000, 0x0000000, 0x000000},
154 {0, 0x0000000, 0x0000000, 0x000000},
155 {0, 0x0000000, 0x0000000, 0x000000},
156 {0, 0x0000000, 0x0000000, 0x000000},
157 {1, 0x09f0000, 0x09f2000, 0x176000} } },
158 {{{0, 0x0a00000, 0x0a02000, 0x178000},
159 {0, 0x0000000, 0x0000000, 0x000000},
160 {0, 0x0000000, 0x0000000, 0x000000},
161 {0, 0x0000000, 0x0000000, 0x000000},
162 {0, 0x0000000, 0x0000000, 0x000000},
163 {0, 0x0000000, 0x0000000, 0x000000},
164 {0, 0x0000000, 0x0000000, 0x000000},
165 {0, 0x0000000, 0x0000000, 0x000000},
166 {0, 0x0000000, 0x0000000, 0x000000},
167 {0, 0x0000000, 0x0000000, 0x000000},
168 {0, 0x0000000, 0x0000000, 0x000000},
169 {0, 0x0000000, 0x0000000, 0x000000},
170 {0, 0x0000000, 0x0000000, 0x000000},
171 {0, 0x0000000, 0x0000000, 0x000000},
172 {0, 0x0000000, 0x0000000, 0x000000},
173 {1, 0x0af0000, 0x0af2000, 0x17a000} } },
174 {{{0, 0x0b00000, 0x0b02000, 0x17c000},
175 {0, 0x0000000, 0x0000000, 0x000000},
176 {0, 0x0000000, 0x0000000, 0x000000},
177 {0, 0x0000000, 0x0000000, 0x000000},
178 {0, 0x0000000, 0x0000000, 0x000000},
179 {0, 0x0000000, 0x0000000, 0x000000},
180 {0, 0x0000000, 0x0000000, 0x000000},
181 {0, 0x0000000, 0x0000000, 0x000000},
182 {0, 0x0000000, 0x0000000, 0x000000},
183 {0, 0x0000000, 0x0000000, 0x000000},
184 {0, 0x0000000, 0x0000000, 0x000000},
185 {0, 0x0000000, 0x0000000, 0x000000},
186 {0, 0x0000000, 0x0000000, 0x000000},
187 {0, 0x0000000, 0x0000000, 0x000000},
188 {0, 0x0000000, 0x0000000, 0x000000},
189 {1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
190 {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },
191 {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },
192 {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },
193 {{{1, 0x0f00000, 0x0f01000, 0x164000} } },
194 {{{0, 0x1000000, 0x1004000, 0x1a8000} } },
195 {{{1, 0x1100000, 0x1101000, 0x160000} } },
196 {{{1, 0x1200000, 0x1201000, 0x161000} } },
197 {{{1, 0x1300000, 0x1301000, 0x162000} } },
198 {{{1, 0x1400000, 0x1401000, 0x163000} } },
199 {{{1, 0x1500000, 0x1501000, 0x165000} } },
200 {{{1, 0x1600000, 0x1601000, 0x166000} } },
201 {{{0, 0, 0, 0} } },
202 {{{0, 0, 0, 0} } },
203 {{{0, 0, 0, 0} } },
204 {{{0, 0, 0, 0} } },
205 {{{0, 0, 0, 0} } },
206 {{{0, 0, 0, 0} } },
207 {{{1, 0x1d00000, 0x1d10000, 0x190000} } },
208 {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },
209 {{{1, 0x1f00000, 0x1f10000, 0x150000} } },
210 {{{0} } },
211 {{{1, 0x2100000, 0x2102000, 0x120000},
212 {1, 0x2110000, 0x2120000, 0x130000},
213 {1, 0x2120000, 0x2122000, 0x124000},
214 {1, 0x2130000, 0x2132000, 0x126000},
215 {1, 0x2140000, 0x2142000, 0x128000},
216 {1, 0x2150000, 0x2152000, 0x12a000},
217 {1, 0x2160000, 0x2170000, 0x110000},
218 {1, 0x2170000, 0x2172000, 0x12e000},
219 {0, 0x0000000, 0x0000000, 0x000000},
220 {0, 0x0000000, 0x0000000, 0x000000},
221 {0, 0x0000000, 0x0000000, 0x000000},
222 {0, 0x0000000, 0x0000000, 0x000000},
223 {0, 0x0000000, 0x0000000, 0x000000},
224 {0, 0x0000000, 0x0000000, 0x000000},
225 {0, 0x0000000, 0x0000000, 0x000000},
226 {0, 0x0000000, 0x0000000, 0x000000} } },
227 {{{1, 0x2200000, 0x2204000, 0x1b0000} } },
228 {{{0} } },
229 {{{0} } },
230 {{{0} } },
231 {{{0} } },
232 {{{0} } },
233 {{{1, 0x2800000, 0x2804000, 0x1a4000} } },
234 {{{1, 0x2900000, 0x2901000, 0x16b000} } },
235 {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },
236 {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },
237 {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },
238 {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },
239 {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },
240 {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },
241 {{{1, 0x3000000, 0x3000400, 0x1adc00} } },
242 {{{0, 0x3100000, 0x3104000, 0x1a8000} } },
243 {{{1, 0x3200000, 0x3204000, 0x1d4000} } },
244 {{{1, 0x3300000, 0x3304000, 0x1a0000} } },
245 {{{0} } },
246 {{{1, 0x3500000, 0x3500400, 0x1ac000} } },
247 {{{1, 0x3600000, 0x3600400, 0x1ae000} } },
248 {{{1, 0x3700000, 0x3700400, 0x1ae400} } },
249 {{{1, 0x3800000, 0x3804000, 0x1d0000} } },
250 {{{1, 0x3900000, 0x3904000, 0x1b4000} } },
251 {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },
252 {{{0} } },
253 {{{0} } },
254 {{{1, 0x3d00000, 0x3d04000, 0x1dc000} } },
255 {{{1, 0x3e00000, 0x3e01000, 0x167000} } },
256 {{{1, 0x3f00000, 0x3f01000, 0x168000} } }
257};
258
259/*
260 * top 12 bits of crb internal address (hub, agent)
261 */
262unsigned qla82xx_crb_hub_agt[64] = {
263 0,
264 QLA82XX_HW_CRB_HUB_AGT_ADR_PS,
265 QLA82XX_HW_CRB_HUB_AGT_ADR_MN,
266 QLA82XX_HW_CRB_HUB_AGT_ADR_MS,
267 0,
268 QLA82XX_HW_CRB_HUB_AGT_ADR_SRE,
269 QLA82XX_HW_CRB_HUB_AGT_ADR_NIU,
270 QLA82XX_HW_CRB_HUB_AGT_ADR_QMN,
271 QLA82XX_HW_CRB_HUB_AGT_ADR_SQN0,
272 QLA82XX_HW_CRB_HUB_AGT_ADR_SQN1,
273 QLA82XX_HW_CRB_HUB_AGT_ADR_SQN2,
274 QLA82XX_HW_CRB_HUB_AGT_ADR_SQN3,
275 QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q,
276 QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR,
277 QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB,
278 QLA82XX_HW_CRB_HUB_AGT_ADR_PGN4,
279 QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA,
280 QLA82XX_HW_CRB_HUB_AGT_ADR_PGN0,
281 QLA82XX_HW_CRB_HUB_AGT_ADR_PGN1,
282 QLA82XX_HW_CRB_HUB_AGT_ADR_PGN2,
283 QLA82XX_HW_CRB_HUB_AGT_ADR_PGN3,
284 QLA82XX_HW_CRB_HUB_AGT_ADR_PGND,
285 QLA82XX_HW_CRB_HUB_AGT_ADR_PGNI,
286 QLA82XX_HW_CRB_HUB_AGT_ADR_PGS0,
287 QLA82XX_HW_CRB_HUB_AGT_ADR_PGS1,
288 QLA82XX_HW_CRB_HUB_AGT_ADR_PGS2,
289 QLA82XX_HW_CRB_HUB_AGT_ADR_PGS3,
290 0,
291 QLA82XX_HW_CRB_HUB_AGT_ADR_PGSI,
292 QLA82XX_HW_CRB_HUB_AGT_ADR_SN,
293 0,
294 QLA82XX_HW_CRB_HUB_AGT_ADR_EG,
295 0,
296 QLA82XX_HW_CRB_HUB_AGT_ADR_PS,
297 QLA82XX_HW_CRB_HUB_AGT_ADR_CAM,
298 0,
299 0,
300 0,
301 0,
302 0,
303 QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR,
304 0,
305 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX1,
306 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX2,
307 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX3,
308 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX4,
309 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX5,
310 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX6,
311 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX7,
312 QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA,
313 QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q,
314 QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB,
315 0,
316 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX0,
317 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX8,
318 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX9,
319 QLA82XX_HW_CRB_HUB_AGT_ADR_OCM0,
320 0,
321 QLA82XX_HW_CRB_HUB_AGT_ADR_SMB,
322 QLA82XX_HW_CRB_HUB_AGT_ADR_I2C0,
323 QLA82XX_HW_CRB_HUB_AGT_ADR_I2C1,
324 0,
325 QLA82XX_HW_CRB_HUB_AGT_ADR_PGNC,
326 0,
327};
328
Giridhar Malavalif1af6202010-05-04 15:01:34 -0700329/* Device states */
330char *qdev_state[] = {
331 "Unknown",
332 "Cold",
333 "Initializing",
334 "Ready",
335 "Need Reset",
336 "Need Quiescent",
337 "Failed",
338 "Quiescent",
339};
340
Giridhar Malavalia9083012010-04-12 17:59:55 -0700341/*
342 * In: 'off' is offset from CRB space in 128M pci map
343 * Out: 'off' is 2M pci map addr
344 * side effect: lock crb window
345 */
346static void
347qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off)
348{
349 u32 win_read;
350
351 ha->crb_win = CRB_HI(*off);
352 writel(ha->crb_win,
353 (void *)(CRB_WINDOW_2M + ha->nx_pcibase));
354
355 /* Read back value to make sure write has gone through before trying
356 * to use it.
357 */
358 win_read = RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase));
359 if (win_read != ha->crb_win) {
360 DEBUG2(qla_printk(KERN_INFO, ha,
361 "%s: Written crbwin (0x%x) != Read crbwin (0x%x), "
362 "off=0x%lx\n", __func__, ha->crb_win, win_read, *off));
363 }
364 *off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase;
365}
366
367static inline unsigned long
368qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off)
369{
370 /* See if we are currently pointing to the region we want to use next */
371 if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_DDR_NET)) {
372 /* No need to change window. PCIX and PCIEregs are in both
373 * regs are in both windows.
374 */
375 return off;
376 }
377
378 if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_PCIX_HOST2)) {
379 /* We are in first CRB window */
380 if (ha->curr_window != 0)
381 WARN_ON(1);
382 return off;
383 }
384
385 if ((off > QLA82XX_CRB_PCIX_HOST2) && (off < QLA82XX_CRB_MAX)) {
386 /* We are in second CRB window */
387 off = off - QLA82XX_CRB_PCIX_HOST2 + QLA82XX_CRB_PCIX_HOST;
388
389 if (ha->curr_window != 1)
390 return off;
391
392 /* We are in the QM or direct access
393 * register region - do nothing
394 */
395 if ((off >= QLA82XX_PCI_DIRECT_CRB) &&
396 (off < QLA82XX_PCI_CAMQM_MAX))
397 return off;
398 }
399 /* strange address given */
400 qla_printk(KERN_WARNING, ha,
401 "%s: Warning: unm_nic_pci_set_crbwindow called with"
402 " an unknown address(%llx)\n", QLA2XXX_DRIVER_NAME, off);
403 return off;
404}
405
Giridhar Malavali77e334d2010-09-03 15:20:52 -0700406static int
407qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong *off)
408{
409 struct crb_128M_2M_sub_block_map *m;
410
411 if (*off >= QLA82XX_CRB_MAX)
412 return -1;
413
414 if (*off >= QLA82XX_PCI_CAMQM && (*off < QLA82XX_PCI_CAMQM_2M_END)) {
415 *off = (*off - QLA82XX_PCI_CAMQM) +
416 QLA82XX_PCI_CAMQM_2M_BASE + ha->nx_pcibase;
417 return 0;
418 }
419
420 if (*off < QLA82XX_PCI_CRBSPACE)
421 return -1;
422
423 *off -= QLA82XX_PCI_CRBSPACE;
424
425 /* Try direct map */
426 m = &crb_128M_2M_map[CRB_BLK(*off)].sub_block[CRB_SUBBLK(*off)];
427
428 if (m->valid && (m->start_128M <= *off) && (m->end_128M > *off)) {
429 *off = *off + m->start_2M - m->start_128M + ha->nx_pcibase;
430 return 0;
431 }
432 /* Not in direct map, use crb window */
433 return 1;
434}
435
436#define CRB_WIN_LOCK_TIMEOUT 100000000
437static int qla82xx_crb_win_lock(struct qla_hw_data *ha)
438{
439 int done = 0, timeout = 0;
440
441 while (!done) {
442 /* acquire semaphore3 from PCI HW block */
443 done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_LOCK));
444 if (done == 1)
445 break;
446 if (timeout >= CRB_WIN_LOCK_TIMEOUT)
447 return -1;
448 timeout++;
449 }
450 qla82xx_wr_32(ha, QLA82XX_CRB_WIN_LOCK_ID, ha->portnum);
451 return 0;
452}
453
Giridhar Malavalia9083012010-04-12 17:59:55 -0700454int
455qla82xx_wr_32(struct qla_hw_data *ha, ulong off, u32 data)
456{
457 unsigned long flags = 0;
458 int rv;
459
460 rv = qla82xx_pci_get_crb_addr_2M(ha, &off);
461
462 BUG_ON(rv == -1);
463
464 if (rv == 1) {
465 write_lock_irqsave(&ha->hw_lock, flags);
466 qla82xx_crb_win_lock(ha);
467 qla82xx_pci_set_crbwindow_2M(ha, &off);
468 }
469
470 writel(data, (void __iomem *)off);
471
472 if (rv == 1) {
473 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
474 write_unlock_irqrestore(&ha->hw_lock, flags);
475 }
476 return 0;
477}
478
479int
480qla82xx_rd_32(struct qla_hw_data *ha, ulong off)
481{
482 unsigned long flags = 0;
483 int rv;
484 u32 data;
485
486 rv = qla82xx_pci_get_crb_addr_2M(ha, &off);
487
488 BUG_ON(rv == -1);
489
490 if (rv == 1) {
491 write_lock_irqsave(&ha->hw_lock, flags);
492 qla82xx_crb_win_lock(ha);
493 qla82xx_pci_set_crbwindow_2M(ha, &off);
494 }
495 data = RD_REG_DWORD((void __iomem *)off);
496
497 if (rv == 1) {
498 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
499 write_unlock_irqrestore(&ha->hw_lock, flags);
500 }
501 return data;
502}
503
Giridhar Malavalia9083012010-04-12 17:59:55 -0700504#define IDC_LOCK_TIMEOUT 100000000
505int qla82xx_idc_lock(struct qla_hw_data *ha)
506{
507 int i;
508 int done = 0, timeout = 0;
509
510 while (!done) {
511 /* acquire semaphore5 from PCI HW block */
512 done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_LOCK));
513 if (done == 1)
514 break;
515 if (timeout >= IDC_LOCK_TIMEOUT)
516 return -1;
517
518 timeout++;
519
520 /* Yield CPU */
521 if (!in_interrupt())
522 schedule();
523 else {
524 for (i = 0; i < 20; i++)
525 cpu_relax();
526 }
527 }
528
529 return 0;
530}
531
532void qla82xx_idc_unlock(struct qla_hw_data *ha)
533{
534 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK));
535}
536
Giridhar Malavalia9083012010-04-12 17:59:55 -0700537/* PCI Windowing for DDR regions. */
538#define QLA82XX_ADDR_IN_RANGE(addr, low, high) \
539 (((addr) <= (high)) && ((addr) >= (low)))
540/*
541 * check memory access boundary.
542 * used by test agent. support ddr access only for now
543 */
544static unsigned long
545qla82xx_pci_mem_bound_check(struct qla_hw_data *ha,
546 unsigned long long addr, int size)
547{
548 if (!QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
549 QLA82XX_ADDR_DDR_NET_MAX) ||
550 !QLA82XX_ADDR_IN_RANGE(addr + size - 1, QLA82XX_ADDR_DDR_NET,
551 QLA82XX_ADDR_DDR_NET_MAX) ||
552 ((size != 1) && (size != 2) && (size != 4) && (size != 8)))
553 return 0;
554 else
555 return 1;
556}
557
558int qla82xx_pci_set_window_warning_count;
559
Giridhar Malavali77e334d2010-09-03 15:20:52 -0700560static unsigned long
Giridhar Malavalia9083012010-04-12 17:59:55 -0700561qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
562{
563 int window;
564 u32 win_read;
565
566 if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
567 QLA82XX_ADDR_DDR_NET_MAX)) {
568 /* DDR network side */
569 window = MN_WIN(addr);
570 ha->ddr_mn_window = window;
571 qla82xx_wr_32(ha,
572 ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window);
573 win_read = qla82xx_rd_32(ha,
574 ha->mn_win_crb | QLA82XX_PCI_CRBSPACE);
575 if ((win_read << 17) != window) {
576 qla_printk(KERN_WARNING, ha,
577 "%s: Written MNwin (0x%x) != Read MNwin (0x%x)\n",
578 __func__, window, win_read);
579 }
580 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET;
581 } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0,
582 QLA82XX_ADDR_OCM0_MAX)) {
583 unsigned int temp1;
584 if ((addr & 0x00ff800) == 0xff800) {
585 qla_printk(KERN_WARNING, ha,
586 "%s: QM access not handled.\n", __func__);
587 addr = -1UL;
588 }
589 window = OCM_WIN(addr);
590 ha->ddr_mn_window = window;
591 qla82xx_wr_32(ha,
592 ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window);
593 win_read = qla82xx_rd_32(ha,
594 ha->mn_win_crb | QLA82XX_PCI_CRBSPACE);
595 temp1 = ((window & 0x1FF) << 7) |
596 ((window & 0x0FFFE0000) >> 17);
597 if (win_read != temp1) {
598 qla_printk(KERN_WARNING, ha,
599 "%s: Written OCMwin (0x%x) != Read OCMwin (0x%x)\n",
600 __func__, temp1, win_read);
601 }
602 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M;
603
604 } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET,
605 QLA82XX_P3_ADDR_QDR_NET_MAX)) {
606 /* QDR network side */
607 window = MS_WIN(addr);
608 ha->qdr_sn_window = window;
609 qla82xx_wr_32(ha,
610 ha->ms_win_crb | QLA82XX_PCI_CRBSPACE, window);
611 win_read = qla82xx_rd_32(ha,
612 ha->ms_win_crb | QLA82XX_PCI_CRBSPACE);
613 if (win_read != window) {
614 qla_printk(KERN_WARNING, ha,
615 "%s: Written MSwin (0x%x) != Read MSwin (0x%x)\n",
616 __func__, window, win_read);
617 }
618 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_QDR_NET;
619 } else {
620 /*
621 * peg gdb frequently accesses memory that doesn't exist,
622 * this limits the chit chat so debugging isn't slowed down.
623 */
624 if ((qla82xx_pci_set_window_warning_count++ < 8) ||
625 (qla82xx_pci_set_window_warning_count%64 == 0)) {
626 qla_printk(KERN_WARNING, ha,
627 "%s: Warning:%s Unknown address range!\n", __func__,
628 QLA2XXX_DRIVER_NAME);
629 }
630 addr = -1UL;
631 }
632 return addr;
633}
634
635/* check if address is in the same windows as the previous access */
636static int qla82xx_pci_is_same_window(struct qla_hw_data *ha,
637 unsigned long long addr)
638{
639 int window;
640 unsigned long long qdr_max;
641
642 qdr_max = QLA82XX_P3_ADDR_QDR_NET_MAX;
643
644 /* DDR network side */
645 if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
646 QLA82XX_ADDR_DDR_NET_MAX))
647 BUG();
648 else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0,
649 QLA82XX_ADDR_OCM0_MAX))
650 return 1;
651 else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM1,
652 QLA82XX_ADDR_OCM1_MAX))
653 return 1;
654 else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET, qdr_max)) {
655 /* QDR network side */
656 window = ((addr - QLA82XX_ADDR_QDR_NET) >> 22) & 0x3f;
657 if (ha->qdr_sn_window == window)
658 return 1;
659 }
660 return 0;
661}
662
663static int qla82xx_pci_mem_read_direct(struct qla_hw_data *ha,
664 u64 off, void *data, int size)
665{
666 unsigned long flags;
Giridhar Malavalif1af6202010-05-04 15:01:34 -0700667 void *addr = NULL;
Giridhar Malavalia9083012010-04-12 17:59:55 -0700668 int ret = 0;
669 u64 start;
670 uint8_t *mem_ptr = NULL;
671 unsigned long mem_base;
672 unsigned long mem_page;
673
674 write_lock_irqsave(&ha->hw_lock, flags);
675
676 /*
677 * If attempting to access unknown address or straddle hw windows,
678 * do not access.
679 */
680 start = qla82xx_pci_set_window(ha, off);
681 if ((start == -1UL) ||
682 (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
683 write_unlock_irqrestore(&ha->hw_lock, flags);
684 qla_printk(KERN_ERR, ha,
685 "%s out of bound pci memory access. "
686 "offset is 0x%llx\n", QLA2XXX_DRIVER_NAME, off);
687 return -1;
688 }
689
Giridhar Malavalif1af6202010-05-04 15:01:34 -0700690 write_unlock_irqrestore(&ha->hw_lock, flags);
691 mem_base = pci_resource_start(ha->pdev, 0);
692 mem_page = start & PAGE_MASK;
693 /* Map two pages whenever user tries to access addresses in two
694 * consecutive pages.
695 */
696 if (mem_page != ((start + size - 1) & PAGE_MASK))
697 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE * 2);
698 else
699 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
700 if (mem_ptr == 0UL) {
701 *(u8 *)data = 0;
702 return -1;
Giridhar Malavalia9083012010-04-12 17:59:55 -0700703 }
Giridhar Malavalif1af6202010-05-04 15:01:34 -0700704 addr = mem_ptr;
705 addr += start & (PAGE_SIZE - 1);
706 write_lock_irqsave(&ha->hw_lock, flags);
Giridhar Malavalia9083012010-04-12 17:59:55 -0700707
708 switch (size) {
709 case 1:
710 *(u8 *)data = readb(addr);
711 break;
712 case 2:
713 *(u16 *)data = readw(addr);
714 break;
715 case 4:
716 *(u32 *)data = readl(addr);
717 break;
718 case 8:
719 *(u64 *)data = readq(addr);
720 break;
721 default:
722 ret = -1;
723 break;
724 }
725 write_unlock_irqrestore(&ha->hw_lock, flags);
726
727 if (mem_ptr)
728 iounmap(mem_ptr);
729 return ret;
730}
731
732static int
733qla82xx_pci_mem_write_direct(struct qla_hw_data *ha,
734 u64 off, void *data, int size)
735{
736 unsigned long flags;
Giridhar Malavalif1af6202010-05-04 15:01:34 -0700737 void *addr = NULL;
Giridhar Malavalia9083012010-04-12 17:59:55 -0700738 int ret = 0;
739 u64 start;
740 uint8_t *mem_ptr = NULL;
741 unsigned long mem_base;
742 unsigned long mem_page;
743
744 write_lock_irqsave(&ha->hw_lock, flags);
745
746 /*
747 * If attempting to access unknown address or straddle hw windows,
748 * do not access.
749 */
750 start = qla82xx_pci_set_window(ha, off);
751 if ((start == -1UL) ||
752 (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
753 write_unlock_irqrestore(&ha->hw_lock, flags);
754 qla_printk(KERN_ERR, ha,
755 "%s out of bound pci memory access. "
756 "offset is 0x%llx\n", QLA2XXX_DRIVER_NAME, off);
757 return -1;
758 }
759
Giridhar Malavalif1af6202010-05-04 15:01:34 -0700760 write_unlock_irqrestore(&ha->hw_lock, flags);
761 mem_base = pci_resource_start(ha->pdev, 0);
762 mem_page = start & PAGE_MASK;
763 /* Map two pages whenever user tries to access addresses in two
764 * consecutive pages.
765 */
766 if (mem_page != ((start + size - 1) & PAGE_MASK))
767 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE*2);
768 else
769 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
770 if (mem_ptr == 0UL)
771 return -1;
Giridhar Malavalia9083012010-04-12 17:59:55 -0700772
Giridhar Malavalif1af6202010-05-04 15:01:34 -0700773 addr = mem_ptr;
774 addr += start & (PAGE_SIZE - 1);
775 write_lock_irqsave(&ha->hw_lock, flags);
Giridhar Malavalia9083012010-04-12 17:59:55 -0700776
777 switch (size) {
778 case 1:
779 writeb(*(u8 *)data, addr);
780 break;
781 case 2:
782 writew(*(u16 *)data, addr);
783 break;
784 case 4:
785 writel(*(u32 *)data, addr);
786 break;
787 case 8:
788 writeq(*(u64 *)data, addr);
789 break;
790 default:
791 ret = -1;
792 break;
793 }
794 write_unlock_irqrestore(&ha->hw_lock, flags);
795 if (mem_ptr)
796 iounmap(mem_ptr);
797 return ret;
798}
799
Giridhar Malavalia9083012010-04-12 17:59:55 -0700800#define MTU_FUDGE_FACTOR 100
Giridhar Malavali77e334d2010-09-03 15:20:52 -0700801static unsigned long
802qla82xx_decode_crb_addr(unsigned long addr)
Giridhar Malavalia9083012010-04-12 17:59:55 -0700803{
804 int i;
805 unsigned long base_addr, offset, pci_base;
806
807 if (!qla82xx_crb_table_initialized)
808 qla82xx_crb_addr_transform_setup();
809
810 pci_base = ADDR_ERROR;
811 base_addr = addr & 0xfff00000;
812 offset = addr & 0x000fffff;
813
814 for (i = 0; i < MAX_CRB_XFORM; i++) {
815 if (crb_addr_xform[i] == base_addr) {
816 pci_base = i << 20;
817 break;
818 }
819 }
820 if (pci_base == ADDR_ERROR)
821 return pci_base;
822 return pci_base + offset;
823}
824
825static long rom_max_timeout = 100;
826static long qla82xx_rom_lock_timeout = 100;
827
Giridhar Malavali77e334d2010-09-03 15:20:52 -0700828static int
Giridhar Malavalia9083012010-04-12 17:59:55 -0700829qla82xx_rom_lock(struct qla_hw_data *ha)
830{
831 int done = 0, timeout = 0;
832
833 while (!done) {
834 /* acquire semaphore2 from PCI HW block */
835 done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK));
836 if (done == 1)
837 break;
838 if (timeout >= qla82xx_rom_lock_timeout)
839 return -1;
840 timeout++;
841 }
842 qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER);
843 return 0;
844}
845
Giridhar Malavali77e334d2010-09-03 15:20:52 -0700846static int
Giridhar Malavalia9083012010-04-12 17:59:55 -0700847qla82xx_wait_rom_busy(struct qla_hw_data *ha)
848{
849 long timeout = 0;
850 long done = 0 ;
851
852 while (done == 0) {
853 done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
854 done &= 4;
855 timeout++;
856 if (timeout >= rom_max_timeout) {
857 DEBUG(qla_printk(KERN_INFO, ha,
858 "%s: Timeout reached waiting for rom busy",
859 QLA2XXX_DRIVER_NAME));
860 return -1;
861 }
862 }
863 return 0;
864}
865
Giridhar Malavali77e334d2010-09-03 15:20:52 -0700866static int
Giridhar Malavalia9083012010-04-12 17:59:55 -0700867qla82xx_wait_rom_done(struct qla_hw_data *ha)
868{
869 long timeout = 0;
870 long done = 0 ;
871
872 while (done == 0) {
873 done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
874 done &= 2;
875 timeout++;
876 if (timeout >= rom_max_timeout) {
877 DEBUG(qla_printk(KERN_INFO, ha,
878 "%s: Timeout reached waiting for rom done",
879 QLA2XXX_DRIVER_NAME));
880 return -1;
881 }
882 }
883 return 0;
884}
885
Giridhar Malavali77e334d2010-09-03 15:20:52 -0700886static int
Giridhar Malavalia9083012010-04-12 17:59:55 -0700887qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
888{
889 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
890 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
891 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
892 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0xb);
893 qla82xx_wait_rom_busy(ha);
894 if (qla82xx_wait_rom_done(ha)) {
895 qla_printk(KERN_WARNING, ha,
896 "%s: Error waiting for rom done\n",
897 QLA2XXX_DRIVER_NAME);
898 return -1;
899 }
900 /* Reset abyte_cnt and dummy_byte_cnt */
901 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
902 udelay(10);
903 cond_resched();
904 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
905 *valp = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
906 return 0;
907}
908
Giridhar Malavali77e334d2010-09-03 15:20:52 -0700909static int
Giridhar Malavalia9083012010-04-12 17:59:55 -0700910qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
911{
912 int ret, loops = 0;
913
914 while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
915 udelay(100);
916 schedule();
917 loops++;
918 }
919 if (loops >= 50000) {
920 qla_printk(KERN_INFO, ha,
921 "%s: qla82xx_rom_lock failed\n",
922 QLA2XXX_DRIVER_NAME);
923 return -1;
924 }
925 ret = qla82xx_do_rom_fast_read(ha, addr, valp);
926 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
927 return ret;
928}
929
Giridhar Malavali77e334d2010-09-03 15:20:52 -0700930static int
Giridhar Malavalia9083012010-04-12 17:59:55 -0700931qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val)
932{
933 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_RDSR);
934 qla82xx_wait_rom_busy(ha);
935 if (qla82xx_wait_rom_done(ha)) {
936 qla_printk(KERN_WARNING, ha,
937 "Error waiting for rom done\n");
938 return -1;
939 }
940 *val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
941 return 0;
942}
943
Giridhar Malavali77e334d2010-09-03 15:20:52 -0700944static int
Giridhar Malavalia9083012010-04-12 17:59:55 -0700945qla82xx_flash_wait_write_finish(struct qla_hw_data *ha)
946{
947 long timeout = 0;
948 uint32_t done = 1 ;
949 uint32_t val;
950 int ret = 0;
951
952 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
953 while ((done != 0) && (ret == 0)) {
954 ret = qla82xx_read_status_reg(ha, &val);
955 done = val & 1;
956 timeout++;
957 udelay(10);
958 cond_resched();
959 if (timeout >= 50000) {
960 qla_printk(KERN_WARNING, ha,
961 "Timeout reached waiting for write finish");
962 return -1;
963 }
964 }
965 return ret;
966}
967
Giridhar Malavali77e334d2010-09-03 15:20:52 -0700968static int
Giridhar Malavalia9083012010-04-12 17:59:55 -0700969qla82xx_flash_set_write_enable(struct qla_hw_data *ha)
970{
971 uint32_t val;
972 qla82xx_wait_rom_busy(ha);
973 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
974 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WREN);
975 qla82xx_wait_rom_busy(ha);
976 if (qla82xx_wait_rom_done(ha))
977 return -1;
978 if (qla82xx_read_status_reg(ha, &val) != 0)
979 return -1;
980 if ((val & 2) != 2)
981 return -1;
982 return 0;
983}
984
Giridhar Malavali77e334d2010-09-03 15:20:52 -0700985static int
Giridhar Malavalia9083012010-04-12 17:59:55 -0700986qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val)
987{
988 if (qla82xx_flash_set_write_enable(ha))
989 return -1;
990 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, val);
991 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0x1);
992 if (qla82xx_wait_rom_done(ha)) {
993 qla_printk(KERN_WARNING, ha,
994 "Error waiting for rom done\n");
995 return -1;
996 }
997 return qla82xx_flash_wait_write_finish(ha);
998}
999
Giridhar Malavali77e334d2010-09-03 15:20:52 -07001000static int
Giridhar Malavalia9083012010-04-12 17:59:55 -07001001qla82xx_write_disable_flash(struct qla_hw_data *ha)
1002{
1003 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WRDI);
1004 if (qla82xx_wait_rom_done(ha)) {
1005 qla_printk(KERN_WARNING, ha,
1006 "Error waiting for rom done\n");
1007 return -1;
1008 }
1009 return 0;
1010}
1011
Giridhar Malavali77e334d2010-09-03 15:20:52 -07001012static int
Giridhar Malavalia9083012010-04-12 17:59:55 -07001013ql82xx_rom_lock_d(struct qla_hw_data *ha)
1014{
1015 int loops = 0;
1016 while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
1017 udelay(100);
1018 cond_resched();
1019 loops++;
1020 }
1021 if (loops >= 50000) {
1022 qla_printk(KERN_WARNING, ha, "ROM lock failed\n");
1023 return -1;
1024 }
1025 return 0;;
1026}
1027
Giridhar Malavali77e334d2010-09-03 15:20:52 -07001028static int
Giridhar Malavalia9083012010-04-12 17:59:55 -07001029qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr,
1030 uint32_t data)
1031{
1032 int ret = 0;
1033
1034 ret = ql82xx_rom_lock_d(ha);
1035 if (ret < 0) {
1036 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n");
1037 return ret;
1038 }
1039
1040 if (qla82xx_flash_set_write_enable(ha))
1041 goto done_write;
1042
1043 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, data);
1044 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, flashaddr);
1045 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
1046 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_PP);
1047 qla82xx_wait_rom_busy(ha);
1048 if (qla82xx_wait_rom_done(ha)) {
1049 qla_printk(KERN_WARNING, ha,
1050 "Error waiting for rom done\n");
1051 ret = -1;
1052 goto done_write;
1053 }
1054
1055 ret = qla82xx_flash_wait_write_finish(ha);
1056
1057done_write:
1058 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
1059 return ret;
1060}
1061
1062/* This routine does CRB initialize sequence
1063 * to put the ISP into operational state
1064 */
Giridhar Malavali77e334d2010-09-03 15:20:52 -07001065static int
1066qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
Giridhar Malavalia9083012010-04-12 17:59:55 -07001067{
1068 int addr, val;
1069 int i ;
1070 struct crb_addr_pair *buf;
1071 unsigned long off;
1072 unsigned offset, n;
1073 struct qla_hw_data *ha = vha->hw;
1074
1075 struct crb_addr_pair {
1076 long addr;
1077 long data;
1078 };
1079
1080 /* Halt all the indiviual PEGs and other blocks of the ISP */
1081 qla82xx_rom_lock(ha);
Madhuranath Iyengarc9e8fd52010-12-21 16:00:19 -08001082
1083 /* mask all niu interrupts */
1084 qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff);
1085 /* disable xge rx/tx */
1086 qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00);
1087 /* disable xg1 rx/tx */
1088 qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00);
1089
1090 /* halt sre */
1091 val = qla82xx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000);
1092 qla82xx_wr_32(ha, QLA82XX_CRB_SRE + 0x1000, val & (~(0x1)));
1093
1094 /* halt epg */
1095 qla82xx_wr_32(ha, QLA82XX_CRB_EPG + 0x1300, 0x1);
1096
1097 /* halt timers */
1098 qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x0, 0x0);
1099 qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x8, 0x0);
1100 qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0);
1101 qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0);
1102 qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0);
1103
1104 /* halt pegs */
1105 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1);
1106 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c, 1);
1107 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1);
1108 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1);
1109 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1);
1110
1111 /* big hammer */
1112 msleep(1000);
Giridhar Malavalia9083012010-04-12 17:59:55 -07001113 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
1114 /* don't reset CAM block on reset */
1115 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff);
1116 else
1117 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff);
Madhuranath Iyengarc9e8fd52010-12-21 16:00:19 -08001118
1119 /* reset ms */
1120 val = qla82xx_rd_32(ha, QLA82XX_CRB_QDR_NET + 0xe4);
1121 val |= (1 << 1);
1122 qla82xx_wr_32(ha, QLA82XX_CRB_QDR_NET + 0xe4, val);
1123 msleep(20);
1124
1125 /* unreset ms */
1126 val = qla82xx_rd_32(ha, QLA82XX_CRB_QDR_NET + 0xe4);
1127 val &= ~(1 << 1);
1128 qla82xx_wr_32(ha, QLA82XX_CRB_QDR_NET + 0xe4, val);
1129 msleep(20);
1130
Giridhar Malavalia9083012010-04-12 17:59:55 -07001131 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
1132
1133 /* Read the signature value from the flash.
1134 * Offset 0: Contain signature (0xcafecafe)
1135 * Offset 4: Offset and number of addr/value pairs
1136 * that present in CRB initialize sequence
1137 */
1138 if (qla82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL ||
1139 qla82xx_rom_fast_read(ha, 4, &n) != 0) {
1140 qla_printk(KERN_WARNING, ha,
1141 "[ERROR] Reading crb_init area: n: %08x\n", n);
1142 return -1;
1143 }
1144
1145 /* Offset in flash = lower 16 bits
1146 * Number of enteries = upper 16 bits
1147 */
1148 offset = n & 0xffffU;
1149 n = (n >> 16) & 0xffffU;
1150
1151 /* number of addr/value pair should not exceed 1024 enteries */
1152 if (n >= 1024) {
1153 qla_printk(KERN_WARNING, ha,
1154 "%s: %s:n=0x%x [ERROR] Card flash not initialized.\n",
1155 QLA2XXX_DRIVER_NAME, __func__, n);
1156 return -1;
1157 }
1158
1159 qla_printk(KERN_INFO, ha,
1160 "%s: %d CRB init values found in ROM.\n", QLA2XXX_DRIVER_NAME, n);
1161
1162 buf = kmalloc(n * sizeof(struct crb_addr_pair), GFP_KERNEL);
1163 if (buf == NULL) {
1164 qla_printk(KERN_WARNING, ha,
1165 "%s: [ERROR] Unable to malloc memory.\n",
1166 QLA2XXX_DRIVER_NAME);
1167 return -1;
1168 }
1169
1170 for (i = 0; i < n; i++) {
1171 if (qla82xx_rom_fast_read(ha, 8*i + 4*offset, &val) != 0 ||
1172 qla82xx_rom_fast_read(ha, 8*i + 4*offset + 4, &addr) != 0) {
1173 kfree(buf);
1174 return -1;
1175 }
1176
1177 buf[i].addr = addr;
1178 buf[i].data = val;
1179 }
1180
1181 for (i = 0; i < n; i++) {
1182 /* Translate internal CRB initialization
1183 * address to PCI bus address
1184 */
1185 off = qla82xx_decode_crb_addr((unsigned long)buf[i].addr) +
1186 QLA82XX_PCI_CRBSPACE;
1187 /* Not all CRB addr/value pair to be written,
1188 * some of them are skipped
1189 */
1190
1191 /* skipping cold reboot MAGIC */
1192 if (off == QLA82XX_CAM_RAM(0x1fc))
1193 continue;
1194
1195 /* do not reset PCI */
1196 if (off == (ROMUSB_GLB + 0xbc))
1197 continue;
1198
1199 /* skip core clock, so that firmware can increase the clock */
1200 if (off == (ROMUSB_GLB + 0xc8))
1201 continue;
1202
1203 /* skip the function enable register */
1204 if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION))
1205 continue;
1206
1207 if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION2))
1208 continue;
1209
1210 if ((off & 0x0ff00000) == QLA82XX_CRB_SMB)
1211 continue;
1212
1213 if ((off & 0x0ff00000) == QLA82XX_CRB_DDR_NET)
1214 continue;
1215
1216 if (off == ADDR_ERROR) {
1217 qla_printk(KERN_WARNING, ha,
1218 "%s: [ERROR] Unknown addr: 0x%08lx\n",
1219 QLA2XXX_DRIVER_NAME, buf[i].addr);
1220 continue;
1221 }
1222
Giridhar Malavalia9083012010-04-12 17:59:55 -07001223 qla82xx_wr_32(ha, off, buf[i].data);
1224
1225 /* ISP requires much bigger delay to settle down,
1226 * else crb_window returns 0xffffffff
1227 */
1228 if (off == QLA82XX_ROMUSB_GLB_SW_RESET)
1229 msleep(1000);
1230
1231 /* ISP requires millisec delay between
1232 * successive CRB register updation
1233 */
1234 msleep(1);
1235 }
1236
1237 kfree(buf);
1238
1239 /* Resetting the data and instruction cache */
1240 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0xec, 0x1e);
1241 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0x4c, 8);
1242 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_I+0x4c, 8);
1243
1244 /* Clear all protocol processing engines */
1245 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0x8, 0);
1246 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0xc, 0);
1247 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0x8, 0);
1248 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0xc, 0);
1249 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0x8, 0);
1250 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0xc, 0);
1251 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0x8, 0);
1252 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0xc, 0);
1253 return 0;
1254}
1255
Giridhar Malavali77e334d2010-09-03 15:20:52 -07001256static int
Giridhar Malavali77e334d2010-09-03 15:20:52 -07001257qla82xx_pci_mem_write_2M(struct qla_hw_data *ha,
1258 u64 off, void *data, int size)
1259{
1260 int i, j, ret = 0, loop, sz[2], off0;
1261 int scale, shift_amount, startword;
1262 uint32_t temp;
1263 uint64_t off8, mem_crb, tmpw, word[2] = {0, 0};
1264
1265 /*
1266 * If not MN, go check for MS or invalid.
1267 */
1268 if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
1269 mem_crb = QLA82XX_CRB_QDR_NET;
1270 else {
1271 mem_crb = QLA82XX_CRB_DDR_NET;
1272 if (qla82xx_pci_mem_bound_check(ha, off, size) == 0)
1273 return qla82xx_pci_mem_write_direct(ha,
1274 off, data, size);
1275 }
1276
1277 off0 = off & 0x7;
1278 sz[0] = (size < (8 - off0)) ? size : (8 - off0);
1279 sz[1] = size - sz[0];
1280
1281 off8 = off & 0xfffffff0;
1282 loop = (((off & 0xf) + size - 1) >> 4) + 1;
1283 shift_amount = 4;
1284 scale = 2;
1285 startword = (off & 0xf)/8;
1286
1287 for (i = 0; i < loop; i++) {
1288 if (qla82xx_pci_mem_read_2M(ha, off8 +
1289 (i << shift_amount), &word[i * scale], 8))
1290 return -1;
1291 }
1292
1293 switch (size) {
1294 case 1:
1295 tmpw = *((uint8_t *)data);
1296 break;
1297 case 2:
1298 tmpw = *((uint16_t *)data);
1299 break;
1300 case 4:
1301 tmpw = *((uint32_t *)data);
1302 break;
1303 case 8:
1304 default:
1305 tmpw = *((uint64_t *)data);
1306 break;
1307 }
1308
1309 if (sz[0] == 8) {
1310 word[startword] = tmpw;
1311 } else {
1312 word[startword] &=
1313 ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8));
1314 word[startword] |= tmpw << (off0 * 8);
1315 }
1316 if (sz[1] != 0) {
1317 word[startword+1] &= ~(~0ULL << (sz[1] * 8));
1318 word[startword+1] |= tmpw >> (sz[0] * 8);
1319 }
1320
Giridhar Malavali77e334d2010-09-03 15:20:52 -07001321 for (i = 0; i < loop; i++) {
1322 temp = off8 + (i << shift_amount);
1323 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp);
1324 temp = 0;
1325 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp);
1326 temp = word[i * scale] & 0xffffffff;
1327 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp);
1328 temp = (word[i * scale] >> 32) & 0xffffffff;
1329 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp);
1330 temp = word[i*scale + 1] & 0xffffffff;
1331 qla82xx_wr_32(ha, mem_crb +
1332 MIU_TEST_AGT_WRDATA_UPPER_LO, temp);
1333 temp = (word[i*scale + 1] >> 32) & 0xffffffff;
1334 qla82xx_wr_32(ha, mem_crb +
1335 MIU_TEST_AGT_WRDATA_UPPER_HI, temp);
1336
1337 temp = MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
1338 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1339 temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
1340 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1341
1342 for (j = 0; j < MAX_CTL_CHECK; j++) {
1343 temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
1344 if ((temp & MIU_TA_CTL_BUSY) == 0)
1345 break;
1346 }
1347
1348 if (j >= MAX_CTL_CHECK) {
1349 if (printk_ratelimit())
1350 dev_err(&ha->pdev->dev,
1351 "failed to write through agent\n");
1352 ret = -1;
1353 break;
1354 }
1355 }
1356
1357 return ret;
1358}
1359
1360static int
Giridhar Malavalia9083012010-04-12 17:59:55 -07001361qla82xx_fw_load_from_flash(struct qla_hw_data *ha)
1362{
1363 int i;
1364 long size = 0;
Harish Zunjarrao9c2b2972010-05-28 15:08:23 -07001365 long flashaddr = ha->flt_region_bootload << 2;
1366 long memaddr = BOOTLD_START;
Giridhar Malavalia9083012010-04-12 17:59:55 -07001367 u64 data;
1368 u32 high, low;
1369 size = (IMAGE_START - BOOTLD_START) / 8;
1370
1371 for (i = 0; i < size; i++) {
1372 if ((qla82xx_rom_fast_read(ha, flashaddr, (int *)&low)) ||
1373 (qla82xx_rom_fast_read(ha, flashaddr + 4, (int *)&high))) {
1374 return -1;
1375 }
1376 data = ((u64)high << 32) | low ;
1377 qla82xx_pci_mem_write_2M(ha, memaddr, &data, 8);
1378 flashaddr += 8;
1379 memaddr += 8;
1380
1381 if (i % 0x1000 == 0)
1382 msleep(1);
1383 }
1384 udelay(100);
1385 read_lock(&ha->hw_lock);
Giridhar Malavali37113332010-07-23 15:28:34 +05001386 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020);
1387 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e);
Giridhar Malavalia9083012010-04-12 17:59:55 -07001388 read_unlock(&ha->hw_lock);
1389 return 0;
1390}
1391
1392int
1393qla82xx_pci_mem_read_2M(struct qla_hw_data *ha,
1394 u64 off, void *data, int size)
1395{
1396 int i, j = 0, k, start, end, loop, sz[2], off0[2];
1397 int shift_amount;
1398 uint32_t temp;
1399 uint64_t off8, val, mem_crb, word[2] = {0, 0};
1400
1401 /*
1402 * If not MN, go check for MS or invalid.
1403 */
1404
1405 if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
1406 mem_crb = QLA82XX_CRB_QDR_NET;
1407 else {
1408 mem_crb = QLA82XX_CRB_DDR_NET;
1409 if (qla82xx_pci_mem_bound_check(ha, off, size) == 0)
1410 return qla82xx_pci_mem_read_direct(ha,
1411 off, data, size);
1412 }
1413
Giridhar Malavali37113332010-07-23 15:28:34 +05001414 off8 = off & 0xfffffff0;
1415 off0[0] = off & 0xf;
1416 sz[0] = (size < (16 - off0[0])) ? size : (16 - off0[0]);
1417 shift_amount = 4;
Giridhar Malavalia9083012010-04-12 17:59:55 -07001418 loop = ((off0[0] + size - 1) >> shift_amount) + 1;
1419 off0[1] = 0;
1420 sz[1] = size - sz[0];
1421
Giridhar Malavalia9083012010-04-12 17:59:55 -07001422 for (i = 0; i < loop; i++) {
1423 temp = off8 + (i << shift_amount);
1424 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp);
1425 temp = 0;
1426 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_HI, temp);
1427 temp = MIU_TA_CTL_ENABLE;
1428 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1429 temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE;
1430 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1431
1432 for (j = 0; j < MAX_CTL_CHECK; j++) {
1433 temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
1434 if ((temp & MIU_TA_CTL_BUSY) == 0)
1435 break;
1436 }
1437
1438 if (j >= MAX_CTL_CHECK) {
1439 if (printk_ratelimit())
1440 dev_err(&ha->pdev->dev,
1441 "failed to read through agent\n");
1442 break;
1443 }
1444
1445 start = off0[i] >> 2;
1446 end = (off0[i] + sz[i] - 1) >> 2;
1447 for (k = start; k <= end; k++) {
1448 temp = qla82xx_rd_32(ha,
1449 mem_crb + MIU_TEST_AGT_RDDATA(k));
1450 word[i] |= ((uint64_t)temp << (32 * (k & 1)));
1451 }
1452 }
1453
Giridhar Malavalia9083012010-04-12 17:59:55 -07001454 if (j >= MAX_CTL_CHECK)
1455 return -1;
1456
1457 if ((off0[0] & 7) == 0) {
1458 val = word[0];
1459 } else {
1460 val = ((word[0] >> (off0[0] * 8)) & (~(~0ULL << (sz[0] * 8)))) |
1461 ((word[1] & (~(~0ULL << (sz[1] * 8)))) << (sz[0] * 8));
1462 }
1463
1464 switch (size) {
1465 case 1:
1466 *(uint8_t *)data = val;
1467 break;
1468 case 2:
1469 *(uint16_t *)data = val;
1470 break;
1471 case 4:
1472 *(uint32_t *)data = val;
1473 break;
1474 case 8:
1475 *(uint64_t *)data = val;
1476 break;
1477 }
1478 return 0;
1479}
1480
Giridhar Malavalia9083012010-04-12 17:59:55 -07001481
Harish Zunjarrao9c2b2972010-05-28 15:08:23 -07001482static struct qla82xx_uri_table_desc *
1483qla82xx_get_table_desc(const u8 *unirom, int section)
1484{
1485 uint32_t i;
1486 struct qla82xx_uri_table_desc *directory =
1487 (struct qla82xx_uri_table_desc *)&unirom[0];
1488 __le32 offset;
1489 __le32 tab_type;
1490 __le32 entries = cpu_to_le32(directory->num_entries);
1491
1492 for (i = 0; i < entries; i++) {
1493 offset = cpu_to_le32(directory->findex) +
1494 (i * cpu_to_le32(directory->entry_size));
1495 tab_type = cpu_to_le32(*((u32 *)&unirom[offset] + 8));
1496
1497 if (tab_type == section)
1498 return (struct qla82xx_uri_table_desc *)&unirom[offset];
1499 }
1500
1501 return NULL;
1502}
1503
1504static struct qla82xx_uri_data_desc *
1505qla82xx_get_data_desc(struct qla_hw_data *ha,
1506 u32 section, u32 idx_offset)
1507{
1508 const u8 *unirom = ha->hablob->fw->data;
1509 int idx = cpu_to_le32(*((int *)&unirom[ha->file_prd_off] + idx_offset));
1510 struct qla82xx_uri_table_desc *tab_desc = NULL;
1511 __le32 offset;
1512
1513 tab_desc = qla82xx_get_table_desc(unirom, section);
1514 if (!tab_desc)
1515 return NULL;
1516
1517 offset = cpu_to_le32(tab_desc->findex) +
1518 (cpu_to_le32(tab_desc->entry_size) * idx);
1519
1520 return (struct qla82xx_uri_data_desc *)&unirom[offset];
1521}
1522
1523static u8 *
1524qla82xx_get_bootld_offset(struct qla_hw_data *ha)
1525{
1526 u32 offset = BOOTLD_START;
1527 struct qla82xx_uri_data_desc *uri_desc = NULL;
1528
1529 if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
1530 uri_desc = qla82xx_get_data_desc(ha,
1531 QLA82XX_URI_DIR_SECT_BOOTLD, QLA82XX_URI_BOOTLD_IDX_OFF);
1532 if (uri_desc)
1533 offset = cpu_to_le32(uri_desc->findex);
1534 }
1535
1536 return (u8 *)&ha->hablob->fw->data[offset];
1537}
1538
1539static __le32
1540qla82xx_get_fw_size(struct qla_hw_data *ha)
1541{
1542 struct qla82xx_uri_data_desc *uri_desc = NULL;
1543
1544 if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
1545 uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW,
1546 QLA82XX_URI_FIRMWARE_IDX_OFF);
1547 if (uri_desc)
1548 return cpu_to_le32(uri_desc->size);
1549 }
1550
1551 return cpu_to_le32(*(u32 *)&ha->hablob->fw->data[FW_SIZE_OFFSET]);
1552}
1553
1554static u8 *
1555qla82xx_get_fw_offs(struct qla_hw_data *ha)
1556{
1557 u32 offset = IMAGE_START;
1558 struct qla82xx_uri_data_desc *uri_desc = NULL;
1559
1560 if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
1561 uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW,
1562 QLA82XX_URI_FIRMWARE_IDX_OFF);
1563 if (uri_desc)
1564 offset = cpu_to_le32(uri_desc->findex);
1565 }
1566
1567 return (u8 *)&ha->hablob->fw->data[offset];
1568}
1569
Giridhar Malavalia9083012010-04-12 17:59:55 -07001570/* PCI related functions */
1571char *
1572qla82xx_pci_info_str(struct scsi_qla_host *vha, char *str)
1573{
1574 int pcie_reg;
1575 struct qla_hw_data *ha = vha->hw;
1576 char lwstr[6];
1577 uint16_t lnk;
1578
1579 pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
1580 pci_read_config_word(ha->pdev, pcie_reg + PCI_EXP_LNKSTA, &lnk);
1581 ha->link_width = (lnk >> 4) & 0x3f;
1582
1583 strcpy(str, "PCIe (");
1584 strcat(str, "2.5Gb/s ");
1585 snprintf(lwstr, sizeof(lwstr), "x%d)", ha->link_width);
1586 strcat(str, lwstr);
1587 return str;
1588}
1589
1590int qla82xx_pci_region_offset(struct pci_dev *pdev, int region)
1591{
1592 unsigned long val = 0;
1593 u32 control;
1594
1595 switch (region) {
1596 case 0:
1597 val = 0;
1598 break;
1599 case 1:
1600 pci_read_config_dword(pdev, QLA82XX_PCI_REG_MSIX_TBL, &control);
1601 val = control + QLA82XX_MSIX_TBL_SPACE;
1602 break;
1603 }
1604 return val;
1605}
1606
Giridhar Malavalia9083012010-04-12 17:59:55 -07001607
1608int
1609qla82xx_iospace_config(struct qla_hw_data *ha)
1610{
1611 uint32_t len = 0;
1612
1613 if (pci_request_regions(ha->pdev, QLA2XXX_DRIVER_NAME)) {
1614 qla_printk(KERN_WARNING, ha,
1615 "Failed to reserve selected regions (%s)\n",
1616 pci_name(ha->pdev));
1617 goto iospace_error_exit;
1618 }
1619
1620 /* Use MMIO operations for all accesses. */
1621 if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
1622 qla_printk(KERN_ERR, ha,
1623 "region #0 not an MMIO resource (%s), aborting\n",
1624 pci_name(ha->pdev));
1625 goto iospace_error_exit;
1626 }
1627
1628 len = pci_resource_len(ha->pdev, 0);
1629 ha->nx_pcibase =
1630 (unsigned long)ioremap(pci_resource_start(ha->pdev, 0), len);
1631 if (!ha->nx_pcibase) {
1632 qla_printk(KERN_ERR, ha,
1633 "cannot remap pcibase MMIO (%s), aborting\n",
1634 pci_name(ha->pdev));
1635 pci_release_regions(ha->pdev);
1636 goto iospace_error_exit;
1637 }
1638
1639 /* Mapping of IO base pointer */
1640 ha->iobase = (device_reg_t __iomem *)((uint8_t *)ha->nx_pcibase +
1641 0xbc000 + (ha->pdev->devfn << 11));
1642
1643 if (!ql2xdbwr) {
1644 ha->nxdb_wr_ptr =
1645 (unsigned long)ioremap((pci_resource_start(ha->pdev, 4) +
1646 (ha->pdev->devfn << 12)), 4);
1647 if (!ha->nxdb_wr_ptr) {
1648 qla_printk(KERN_ERR, ha,
1649 "cannot remap MMIO (%s), aborting\n",
1650 pci_name(ha->pdev));
1651 pci_release_regions(ha->pdev);
1652 goto iospace_error_exit;
1653 }
1654
1655 /* Mapping of IO base pointer,
1656 * door bell read and write pointer
1657 */
1658 ha->nxdb_rd_ptr = (uint8_t *) ha->nx_pcibase + (512 * 1024) +
1659 (ha->pdev->devfn * 8);
1660 } else {
1661 ha->nxdb_wr_ptr = (ha->pdev->devfn == 6 ?
1662 QLA82XX_CAMRAM_DB1 :
1663 QLA82XX_CAMRAM_DB2);
1664 }
1665
1666 ha->max_req_queues = ha->max_rsp_queues = 1;
1667 ha->msix_count = ha->max_rsp_queues + 1;
1668 return 0;
1669
1670iospace_error_exit:
1671 return -ENOMEM;
1672}
1673
1674/* GS related functions */
1675
1676/* Initialization related functions */
1677
1678/**
1679 * qla82xx_pci_config() - Setup ISP82xx PCI configuration registers.
1680 * @ha: HA context
1681 *
1682 * Returns 0 on success.
1683*/
1684int
1685qla82xx_pci_config(scsi_qla_host_t *vha)
1686{
1687 struct qla_hw_data *ha = vha->hw;
1688 int ret;
1689
1690 pci_set_master(ha->pdev);
1691 ret = pci_set_mwi(ha->pdev);
1692 ha->chip_revision = ha->pdev->revision;
1693 return 0;
1694}
1695
1696/**
1697 * qla82xx_reset_chip() - Setup ISP82xx PCI configuration registers.
1698 * @ha: HA context
1699 *
1700 * Returns 0 on success.
1701 */
1702void
1703qla82xx_reset_chip(scsi_qla_host_t *vha)
1704{
1705 struct qla_hw_data *ha = vha->hw;
1706 ha->isp_ops->disable_intrs(ha);
1707}
1708
1709void qla82xx_config_rings(struct scsi_qla_host *vha)
1710{
1711 struct qla_hw_data *ha = vha->hw;
1712 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
1713 struct init_cb_81xx *icb;
1714 struct req_que *req = ha->req_q_map[0];
1715 struct rsp_que *rsp = ha->rsp_q_map[0];
1716
1717 /* Setup ring parameters in initialization control block. */
1718 icb = (struct init_cb_81xx *)ha->init_cb;
1719 icb->request_q_outpointer = __constant_cpu_to_le16(0);
1720 icb->response_q_inpointer = __constant_cpu_to_le16(0);
1721 icb->request_q_length = cpu_to_le16(req->length);
1722 icb->response_q_length = cpu_to_le16(rsp->length);
1723 icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
1724 icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
1725 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
1726 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
1727
Giridhar Malavalia9083012010-04-12 17:59:55 -07001728 WRT_REG_DWORD((unsigned long __iomem *)&reg->req_q_out[0], 0);
1729 WRT_REG_DWORD((unsigned long __iomem *)&reg->rsp_q_in[0], 0);
1730 WRT_REG_DWORD((unsigned long __iomem *)&reg->rsp_q_out[0], 0);
1731}
1732
Giridhar Malavalif1af6202010-05-04 15:01:34 -07001733void qla82xx_reset_adapter(struct scsi_qla_host *vha)
1734{
1735 struct qla_hw_data *ha = vha->hw;
1736 vha->flags.online = 0;
1737 qla2x00_try_to_stop_firmware(vha);
1738 ha->isp_ops->disable_intrs(ha);
1739}
1740
Giridhar Malavali77e334d2010-09-03 15:20:52 -07001741static int
1742qla82xx_fw_load_from_blob(struct qla_hw_data *ha)
Giridhar Malavalia9083012010-04-12 17:59:55 -07001743{
1744 u64 *ptr64;
1745 u32 i, flashaddr, size;
1746 __le64 data;
1747
1748 size = (IMAGE_START - BOOTLD_START) / 8;
1749
Harish Zunjarrao9c2b2972010-05-28 15:08:23 -07001750 ptr64 = (u64 *)qla82xx_get_bootld_offset(ha);
Giridhar Malavalia9083012010-04-12 17:59:55 -07001751 flashaddr = BOOTLD_START;
1752
1753 for (i = 0; i < size; i++) {
1754 data = cpu_to_le64(ptr64[i]);
Harish Zunjarrao9c2b2972010-05-28 15:08:23 -07001755 if (qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8))
1756 return -EIO;
Giridhar Malavalia9083012010-04-12 17:59:55 -07001757 flashaddr += 8;
1758 }
1759
Giridhar Malavalia9083012010-04-12 17:59:55 -07001760 flashaddr = FLASH_ADDR_START;
Harish Zunjarrao9c2b2972010-05-28 15:08:23 -07001761 size = (__force u32)qla82xx_get_fw_size(ha) / 8;
1762 ptr64 = (u64 *)qla82xx_get_fw_offs(ha);
Giridhar Malavalia9083012010-04-12 17:59:55 -07001763
1764 for (i = 0; i < size; i++) {
1765 data = cpu_to_le64(ptr64[i]);
1766
1767 if (qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8))
1768 return -EIO;
1769 flashaddr += 8;
1770 }
Harish Zunjarrao9c2b2972010-05-28 15:08:23 -07001771 udelay(100);
Giridhar Malavalia9083012010-04-12 17:59:55 -07001772
1773 /* Write a magic value to CAMRAM register
1774 * at a specified offset to indicate
1775 * that all data is written and
1776 * ready for firmware to initialize.
1777 */
Harish Zunjarrao9c2b2972010-05-28 15:08:23 -07001778 qla82xx_wr_32(ha, QLA82XX_CAM_RAM(0x1fc), QLA82XX_BDINFO_MAGIC);
Giridhar Malavalia9083012010-04-12 17:59:55 -07001779
Harish Zunjarrao9c2b2972010-05-28 15:08:23 -07001780 read_lock(&ha->hw_lock);
Giridhar Malavali37113332010-07-23 15:28:34 +05001781 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020);
1782 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e);
Harish Zunjarrao9c2b2972010-05-28 15:08:23 -07001783 read_unlock(&ha->hw_lock);
1784 return 0;
1785}
1786
1787static int
1788qla82xx_set_product_offset(struct qla_hw_data *ha)
1789{
1790 struct qla82xx_uri_table_desc *ptab_desc = NULL;
1791 const uint8_t *unirom = ha->hablob->fw->data;
1792 uint32_t i;
1793 __le32 entries;
1794 __le32 flags, file_chiprev, offset;
1795 uint8_t chiprev = ha->chip_revision;
1796 /* Hardcoding mn_present flag for P3P */
1797 int mn_present = 0;
1798 uint32_t flagbit;
1799
1800 ptab_desc = qla82xx_get_table_desc(unirom,
1801 QLA82XX_URI_DIR_SECT_PRODUCT_TBL);
1802 if (!ptab_desc)
1803 return -1;
1804
1805 entries = cpu_to_le32(ptab_desc->num_entries);
1806
1807 for (i = 0; i < entries; i++) {
1808 offset = cpu_to_le32(ptab_desc->findex) +
1809 (i * cpu_to_le32(ptab_desc->entry_size));
1810 flags = cpu_to_le32(*((int *)&unirom[offset] +
1811 QLA82XX_URI_FLAGS_OFF));
1812 file_chiprev = cpu_to_le32(*((int *)&unirom[offset] +
1813 QLA82XX_URI_CHIP_REV_OFF));
1814
1815 flagbit = mn_present ? 1 : 2;
1816
1817 if ((chiprev == file_chiprev) && ((1ULL << flagbit) & flags)) {
1818 ha->file_prd_off = offset;
1819 return 0;
1820 }
1821 }
1822 return -1;
1823}
1824
1825int
1826qla82xx_validate_firmware_blob(scsi_qla_host_t *vha, uint8_t fw_type)
1827{
1828 __le32 val;
1829 uint32_t min_size;
1830 struct qla_hw_data *ha = vha->hw;
1831 const struct firmware *fw = ha->hablob->fw;
1832
1833 ha->fw_type = fw_type;
1834
1835 if (fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
1836 if (qla82xx_set_product_offset(ha))
1837 return -EINVAL;
1838
1839 min_size = QLA82XX_URI_FW_MIN_SIZE;
1840 } else {
1841 val = cpu_to_le32(*(u32 *)&fw->data[QLA82XX_FW_MAGIC_OFFSET]);
1842 if ((__force u32)val != QLA82XX_BDINFO_MAGIC)
1843 return -EINVAL;
1844
1845 min_size = QLA82XX_FW_MIN_SIZE;
1846 }
1847
1848 if (fw->size < min_size)
1849 return -EINVAL;
Giridhar Malavalia9083012010-04-12 17:59:55 -07001850 return 0;
1851}
1852
Giridhar Malavali77e334d2010-09-03 15:20:52 -07001853static int
1854qla82xx_check_cmdpeg_state(struct qla_hw_data *ha)
Giridhar Malavalia9083012010-04-12 17:59:55 -07001855{
1856 u32 val = 0;
1857 int retries = 60;
1858
1859 do {
1860 read_lock(&ha->hw_lock);
1861 val = qla82xx_rd_32(ha, CRB_CMDPEG_STATE);
1862 read_unlock(&ha->hw_lock);
1863
1864 switch (val) {
1865 case PHAN_INITIALIZE_COMPLETE:
1866 case PHAN_INITIALIZE_ACK:
1867 return QLA_SUCCESS;
1868 case PHAN_INITIALIZE_FAILED:
1869 break;
1870 default:
1871 break;
1872 }
1873 qla_printk(KERN_WARNING, ha,
1874 "CRB_CMDPEG_STATE: 0x%x and retries: 0x%x\n",
1875 val, retries);
1876
1877 msleep(500);
1878
1879 } while (--retries);
1880
1881 qla_printk(KERN_INFO, ha,
1882 "Cmd Peg initialization failed: 0x%x.\n", val);
1883
Giridhar Malavalia9083012010-04-12 17:59:55 -07001884 val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_PEGTUNE_DONE);
1885 read_lock(&ha->hw_lock);
1886 qla82xx_wr_32(ha, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
1887 read_unlock(&ha->hw_lock);
1888 return QLA_FUNCTION_FAILED;
1889}
1890
Giridhar Malavali77e334d2010-09-03 15:20:52 -07001891static int
1892qla82xx_check_rcvpeg_state(struct qla_hw_data *ha)
Giridhar Malavalia9083012010-04-12 17:59:55 -07001893{
1894 u32 val = 0;
1895 int retries = 60;
1896
1897 do {
1898 read_lock(&ha->hw_lock);
1899 val = qla82xx_rd_32(ha, CRB_RCVPEG_STATE);
1900 read_unlock(&ha->hw_lock);
1901
1902 switch (val) {
1903 case PHAN_INITIALIZE_COMPLETE:
1904 case PHAN_INITIALIZE_ACK:
1905 return QLA_SUCCESS;
1906 case PHAN_INITIALIZE_FAILED:
1907 break;
1908 default:
1909 break;
1910 }
1911
1912 qla_printk(KERN_WARNING, ha,
1913 "CRB_RCVPEG_STATE: 0x%x and retries: 0x%x\n",
1914 val, retries);
1915
1916 msleep(500);
1917
1918 } while (--retries);
1919
1920 qla_printk(KERN_INFO, ha,
1921 "Rcv Peg initialization failed: 0x%x.\n", val);
1922 read_lock(&ha->hw_lock);
1923 qla82xx_wr_32(ha, CRB_RCVPEG_STATE, PHAN_INITIALIZE_FAILED);
1924 read_unlock(&ha->hw_lock);
1925 return QLA_FUNCTION_FAILED;
1926}
1927
1928/* ISR related functions */
1929uint32_t qla82xx_isr_int_target_mask_enable[8] = {
1930 ISR_INT_TARGET_MASK, ISR_INT_TARGET_MASK_F1,
1931 ISR_INT_TARGET_MASK_F2, ISR_INT_TARGET_MASK_F3,
1932 ISR_INT_TARGET_MASK_F4, ISR_INT_TARGET_MASK_F5,
1933 ISR_INT_TARGET_MASK_F7, ISR_INT_TARGET_MASK_F7
1934};
1935
1936uint32_t qla82xx_isr_int_target_status[8] = {
1937 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
1938 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
1939 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
1940 ISR_INT_TARGET_STATUS_F7, ISR_INT_TARGET_STATUS_F7
1941};
1942
1943static struct qla82xx_legacy_intr_set legacy_intr[] = \
1944 QLA82XX_LEGACY_INTR_CONFIG;
1945
1946/*
1947 * qla82xx_mbx_completion() - Process mailbox command completions.
1948 * @ha: SCSI driver HA context
1949 * @mb0: Mailbox0 register
1950 */
Giridhar Malavali77e334d2010-09-03 15:20:52 -07001951static void
Giridhar Malavalia9083012010-04-12 17:59:55 -07001952qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1953{
1954 uint16_t cnt;
1955 uint16_t __iomem *wptr;
1956 struct qla_hw_data *ha = vha->hw;
1957 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
1958 wptr = (uint16_t __iomem *)&reg->mailbox_out[1];
1959
1960 /* Load return mailbox registers. */
1961 ha->flags.mbox_int = 1;
1962 ha->mailbox_out[0] = mb0;
1963
1964 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
1965 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
1966 wptr++;
1967 }
1968
1969 if (ha->mcp) {
1970 DEBUG3_11(printk(KERN_INFO "%s(%ld): "
1971 "Got mailbox completion. cmd=%x.\n",
1972 __func__, vha->host_no, ha->mcp->mb[0]));
1973 } else {
1974 qla_printk(KERN_INFO, ha,
1975 "%s(%ld): MBX pointer ERROR!\n",
1976 __func__, vha->host_no);
1977 }
1978}
1979
1980/*
1981 * qla82xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
1982 * @irq:
1983 * @dev_id: SCSI driver HA context
1984 * @regs:
1985 *
1986 * Called by system whenever the host adapter generates an interrupt.
1987 *
1988 * Returns handled flag.
1989 */
1990irqreturn_t
1991qla82xx_intr_handler(int irq, void *dev_id)
1992{
1993 scsi_qla_host_t *vha;
1994 struct qla_hw_data *ha;
1995 struct rsp_que *rsp;
1996 struct device_reg_82xx __iomem *reg;
1997 int status = 0, status1 = 0;
1998 unsigned long flags;
1999 unsigned long iter;
2000 uint32_t stat;
2001 uint16_t mb[4];
2002
2003 rsp = (struct rsp_que *) dev_id;
2004 if (!rsp) {
2005 printk(KERN_INFO
2006 "%s(): NULL response queue pointer\n", __func__);
2007 return IRQ_NONE;
2008 }
2009 ha = rsp->hw;
2010
2011 if (!ha->flags.msi_enabled) {
2012 status = qla82xx_rd_32(ha, ISR_INT_VECTOR);
2013 if (!(status & ha->nx_legacy_intr.int_vec_bit))
2014 return IRQ_NONE;
2015
2016 status1 = qla82xx_rd_32(ha, ISR_INT_STATE_REG);
2017 if (!ISR_IS_LEGACY_INTR_TRIGGERED(status1))
2018 return IRQ_NONE;
2019 }
2020
2021 /* clear the interrupt */
2022 qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
2023
2024 /* read twice to ensure write is flushed */
2025 qla82xx_rd_32(ha, ISR_INT_VECTOR);
2026 qla82xx_rd_32(ha, ISR_INT_VECTOR);
2027
2028 reg = &ha->iobase->isp82;
2029
2030 spin_lock_irqsave(&ha->hardware_lock, flags);
2031 vha = pci_get_drvdata(ha->pdev);
2032 for (iter = 1; iter--; ) {
2033
2034 if (RD_REG_DWORD(&reg->host_int)) {
2035 stat = RD_REG_DWORD(&reg->host_status);
Giridhar Malavalia9083012010-04-12 17:59:55 -07002036
2037 switch (stat & 0xff) {
2038 case 0x1:
2039 case 0x2:
2040 case 0x10:
2041 case 0x11:
2042 qla82xx_mbx_completion(vha, MSW(stat));
2043 status |= MBX_INTERRUPT;
2044 break;
2045 case 0x12:
2046 mb[0] = MSW(stat);
2047 mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
2048 mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
2049 mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
2050 qla2x00_async_event(vha, rsp, mb);
2051 break;
2052 case 0x13:
2053 qla24xx_process_response_queue(vha, rsp);
2054 break;
2055 default:
2056 DEBUG2(printk("scsi(%ld): "
2057 " Unrecognized interrupt type (%d).\n",
2058 vha->host_no, stat & 0xff));
2059 break;
2060 }
2061 }
2062 WRT_REG_DWORD(&reg->host_int, 0);
2063 }
2064 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2065 if (!ha->flags.msi_enabled)
2066 qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
2067
2068#ifdef QL_DEBUG_LEVEL_17
2069 if (!irq && ha->flags.eeh_busy)
2070 qla_printk(KERN_WARNING, ha,
2071 "isr: status %x, cmd_flags %lx, mbox_int %x, stat %x\n",
2072 status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
2073#endif
2074
2075 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2076 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2077 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2078 complete(&ha->mbx_intr_comp);
2079 }
2080 return IRQ_HANDLED;
2081}
2082
2083irqreturn_t
2084qla82xx_msix_default(int irq, void *dev_id)
2085{
2086 scsi_qla_host_t *vha;
2087 struct qla_hw_data *ha;
2088 struct rsp_que *rsp;
2089 struct device_reg_82xx __iomem *reg;
2090 int status = 0;
2091 unsigned long flags;
2092 uint32_t stat;
2093 uint16_t mb[4];
2094
2095 rsp = (struct rsp_que *) dev_id;
2096 if (!rsp) {
2097 printk(KERN_INFO
2098 "%s(): NULL response queue pointer\n", __func__);
2099 return IRQ_NONE;
2100 }
2101 ha = rsp->hw;
2102
2103 reg = &ha->iobase->isp82;
2104
2105 spin_lock_irqsave(&ha->hardware_lock, flags);
2106 vha = pci_get_drvdata(ha->pdev);
2107 do {
2108 if (RD_REG_DWORD(&reg->host_int)) {
2109 stat = RD_REG_DWORD(&reg->host_status);
Giridhar Malavalia9083012010-04-12 17:59:55 -07002110
2111 switch (stat & 0xff) {
2112 case 0x1:
2113 case 0x2:
2114 case 0x10:
2115 case 0x11:
2116 qla82xx_mbx_completion(vha, MSW(stat));
2117 status |= MBX_INTERRUPT;
2118 break;
2119 case 0x12:
2120 mb[0] = MSW(stat);
2121 mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
2122 mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
2123 mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
2124 qla2x00_async_event(vha, rsp, mb);
2125 break;
2126 case 0x13:
2127 qla24xx_process_response_queue(vha, rsp);
2128 break;
2129 default:
2130 DEBUG2(printk("scsi(%ld): "
2131 " Unrecognized interrupt type (%d).\n",
2132 vha->host_no, stat & 0xff));
2133 break;
2134 }
2135 }
2136 WRT_REG_DWORD(&reg->host_int, 0);
2137 } while (0);
2138
2139 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2140
2141#ifdef QL_DEBUG_LEVEL_17
2142 if (!irq && ha->flags.eeh_busy)
2143 qla_printk(KERN_WARNING, ha,
2144 "isr: status %x, cmd_flags %lx, mbox_int %x, stat %x\n",
2145 status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
2146#endif
2147
2148 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2149 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2150 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2151 complete(&ha->mbx_intr_comp);
2152 }
2153 return IRQ_HANDLED;
2154}
2155
2156irqreturn_t
2157qla82xx_msix_rsp_q(int irq, void *dev_id)
2158{
2159 scsi_qla_host_t *vha;
2160 struct qla_hw_data *ha;
2161 struct rsp_que *rsp;
2162 struct device_reg_82xx __iomem *reg;
2163
2164 rsp = (struct rsp_que *) dev_id;
2165 if (!rsp) {
2166 printk(KERN_INFO
2167 "%s(): NULL response queue pointer\n", __func__);
2168 return IRQ_NONE;
2169 }
2170
2171 ha = rsp->hw;
2172 reg = &ha->iobase->isp82;
2173 spin_lock_irq(&ha->hardware_lock);
2174 vha = pci_get_drvdata(ha->pdev);
2175 qla24xx_process_response_queue(vha, rsp);
2176 WRT_REG_DWORD(&reg->host_int, 0);
2177 spin_unlock_irq(&ha->hardware_lock);
2178 return IRQ_HANDLED;
2179}
2180
2181void
2182qla82xx_poll(int irq, void *dev_id)
2183{
2184 scsi_qla_host_t *vha;
2185 struct qla_hw_data *ha;
2186 struct rsp_que *rsp;
2187 struct device_reg_82xx __iomem *reg;
2188 int status = 0;
2189 uint32_t stat;
2190 uint16_t mb[4];
2191 unsigned long flags;
2192
2193 rsp = (struct rsp_que *) dev_id;
2194 if (!rsp) {
2195 printk(KERN_INFO
2196 "%s(): NULL response queue pointer\n", __func__);
2197 return;
2198 }
2199 ha = rsp->hw;
2200
2201 reg = &ha->iobase->isp82;
2202 spin_lock_irqsave(&ha->hardware_lock, flags);
2203 vha = pci_get_drvdata(ha->pdev);
2204
2205 if (RD_REG_DWORD(&reg->host_int)) {
2206 stat = RD_REG_DWORD(&reg->host_status);
2207 switch (stat & 0xff) {
2208 case 0x1:
2209 case 0x2:
2210 case 0x10:
2211 case 0x11:
2212 qla82xx_mbx_completion(vha, MSW(stat));
2213 status |= MBX_INTERRUPT;
2214 break;
2215 case 0x12:
2216 mb[0] = MSW(stat);
2217 mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
2218 mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
2219 mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
2220 qla2x00_async_event(vha, rsp, mb);
2221 break;
2222 case 0x13:
2223 qla24xx_process_response_queue(vha, rsp);
2224 break;
2225 default:
2226 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
2227 "(%d).\n",
2228 vha->host_no, stat & 0xff));
2229 break;
2230 }
2231 }
2232 WRT_REG_DWORD(&reg->host_int, 0);
2233 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2234}
2235
2236void
2237qla82xx_enable_intrs(struct qla_hw_data *ha)
2238{
2239 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2240 qla82xx_mbx_intr_enable(vha);
2241 spin_lock_irq(&ha->hardware_lock);
2242 qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
2243 spin_unlock_irq(&ha->hardware_lock);
2244 ha->interrupts_on = 1;
2245}
2246
2247void
2248qla82xx_disable_intrs(struct qla_hw_data *ha)
2249{
2250 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2251 qla82xx_mbx_intr_disable(vha);
2252 spin_lock_irq(&ha->hardware_lock);
2253 qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400);
2254 spin_unlock_irq(&ha->hardware_lock);
2255 ha->interrupts_on = 0;
2256}
2257
2258void qla82xx_init_flags(struct qla_hw_data *ha)
2259{
2260 struct qla82xx_legacy_intr_set *nx_legacy_intr;
2261
2262 /* ISP 8021 initializations */
2263 rwlock_init(&ha->hw_lock);
2264 ha->qdr_sn_window = -1;
2265 ha->ddr_mn_window = -1;
2266 ha->curr_window = 255;
2267 ha->portnum = PCI_FUNC(ha->pdev->devfn);
2268 nx_legacy_intr = &legacy_intr[ha->portnum];
2269 ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
2270 ha->nx_legacy_intr.tgt_status_reg = nx_legacy_intr->tgt_status_reg;
2271 ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
2272 ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
2273}
2274
Lalit Chandivadea5b36322010-09-03 15:20:50 -07002275inline void
Giridhar Malavalia9083012010-04-12 17:59:55 -07002276qla82xx_set_drv_active(scsi_qla_host_t *vha)
2277{
2278 uint32_t drv_active;
2279 struct qla_hw_data *ha = vha->hw;
2280
2281 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
2282
2283 /* If reset value is all FF's, initialize DRV_ACTIVE */
2284 if (drv_active == 0xffffffff) {
Giridhar Malavali77e334d2010-09-03 15:20:52 -07002285 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE,
2286 QLA82XX_DRV_NOT_ACTIVE);
Giridhar Malavalia9083012010-04-12 17:59:55 -07002287 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
2288 }
Giridhar Malavali77e334d2010-09-03 15:20:52 -07002289 drv_active |= (QLA82XX_DRV_ACTIVE << (ha->portnum * 4));
Giridhar Malavalia9083012010-04-12 17:59:55 -07002290 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
2291}
2292
2293inline void
2294qla82xx_clear_drv_active(struct qla_hw_data *ha)
2295{
2296 uint32_t drv_active;
2297
2298 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
Giridhar Malavali77e334d2010-09-03 15:20:52 -07002299 drv_active &= ~(QLA82XX_DRV_ACTIVE << (ha->portnum * 4));
Giridhar Malavalia9083012010-04-12 17:59:55 -07002300 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
2301}
2302
2303static inline int
2304qla82xx_need_reset(struct qla_hw_data *ha)
2305{
2306 uint32_t drv_state;
2307 int rval;
2308
2309 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
Giridhar Malavali77e334d2010-09-03 15:20:52 -07002310 rval = drv_state & (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
Giridhar Malavalia9083012010-04-12 17:59:55 -07002311 return rval;
2312}
2313
2314static inline void
2315qla82xx_set_rst_ready(struct qla_hw_data *ha)
2316{
2317 uint32_t drv_state;
2318 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2319
2320 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2321
2322 /* If reset value is all FF's, initialize DRV_STATE */
2323 if (drv_state == 0xffffffff) {
Giridhar Malavali77e334d2010-09-03 15:20:52 -07002324 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, QLA82XX_DRVST_NOT_RDY);
Giridhar Malavalia9083012010-04-12 17:59:55 -07002325 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2326 }
2327 drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
2328 qla_printk(KERN_INFO, ha,
2329 "%s(%ld):drv_state = 0x%x\n",
2330 __func__, vha->host_no, drv_state);
2331 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
2332}
2333
2334static inline void
2335qla82xx_clear_rst_ready(struct qla_hw_data *ha)
2336{
2337 uint32_t drv_state;
2338
2339 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2340 drv_state &= ~(QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
2341 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
2342}
2343
2344static inline void
2345qla82xx_set_qsnt_ready(struct qla_hw_data *ha)
2346{
2347 uint32_t qsnt_state;
2348
2349 qsnt_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2350 qsnt_state |= (QLA82XX_DRVST_QSNT_RDY << (ha->portnum * 4));
2351 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state);
2352}
2353
Saurav Kashyap579d12b2010-12-21 16:00:14 -08002354void
2355qla82xx_clear_qsnt_ready(scsi_qla_host_t *vha)
2356{
2357 struct qla_hw_data *ha = vha->hw;
2358 uint32_t qsnt_state;
2359
2360 qsnt_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2361 qsnt_state &= ~(QLA82XX_DRVST_QSNT_RDY << (ha->portnum * 4));
2362 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state);
2363}
2364
Giridhar Malavali77e334d2010-09-03 15:20:52 -07002365static int
2366qla82xx_load_fw(scsi_qla_host_t *vha)
Giridhar Malavalia9083012010-04-12 17:59:55 -07002367{
2368 int rst;
2369 struct fw_blob *blob;
2370 struct qla_hw_data *ha = vha->hw;
2371
Giridhar Malavalia9083012010-04-12 17:59:55 -07002372 if (qla82xx_pinit_from_rom(vha) != QLA_SUCCESS) {
2373 qla_printk(KERN_ERR, ha,
2374 "%s: Error during CRB Initialization\n", __func__);
2375 return QLA_FUNCTION_FAILED;
2376 }
2377 udelay(500);
2378
2379 /* Bring QM and CAMRAM out of reset */
2380 rst = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET);
2381 rst &= ~((1 << 28) | (1 << 24));
2382 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, rst);
2383
2384 /*
2385 * FW Load priority:
2386 * 1) Operational firmware residing in flash.
2387 * 2) Firmware via request-firmware interface (.bin file).
2388 */
2389 if (ql2xfwloadbin == 2)
2390 goto try_blob_fw;
2391
2392 qla_printk(KERN_INFO, ha,
2393 "Attempting to load firmware from flash\n");
2394
2395 if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) {
2396 qla_printk(KERN_ERR, ha,
2397 "Firmware loaded successfully from flash\n");
2398 return QLA_SUCCESS;
2399 }
2400try_blob_fw:
2401 qla_printk(KERN_INFO, ha,
2402 "Attempting to load firmware from blob\n");
2403
2404 /* Load firmware blob. */
2405 blob = ha->hablob = qla2x00_request_firmware(vha);
2406 if (!blob) {
2407 qla_printk(KERN_ERR, ha,
2408 "Firmware image not present.\n");
2409 goto fw_load_failed;
2410 }
2411
Harish Zunjarrao9c2b2972010-05-28 15:08:23 -07002412 /* Validating firmware blob */
2413 if (qla82xx_validate_firmware_blob(vha,
2414 QLA82XX_FLASH_ROMIMAGE)) {
2415 /* Fallback to URI format */
2416 if (qla82xx_validate_firmware_blob(vha,
2417 QLA82XX_UNIFIED_ROMIMAGE)) {
2418 qla_printk(KERN_ERR, ha,
2419 "No valid firmware image found!!!");
2420 return QLA_FUNCTION_FAILED;
2421 }
2422 }
2423
Giridhar Malavalia9083012010-04-12 17:59:55 -07002424 if (qla82xx_fw_load_from_blob(ha) == QLA_SUCCESS) {
2425 qla_printk(KERN_ERR, ha,
2426 "%s: Firmware loaded successfully "
2427 " from binary blob\n", __func__);
2428 return QLA_SUCCESS;
2429 } else {
2430 qla_printk(KERN_ERR, ha,
2431 "Firmware load failed from binary blob\n");
2432 blob->fw = NULL;
2433 blob = NULL;
2434 goto fw_load_failed;
2435 }
2436 return QLA_SUCCESS;
2437
2438fw_load_failed:
2439 return QLA_FUNCTION_FAILED;
2440}
2441
Lalit Chandivadea5b36322010-09-03 15:20:50 -07002442int
Giridhar Malavalia9083012010-04-12 17:59:55 -07002443qla82xx_start_firmware(scsi_qla_host_t *vha)
2444{
2445 int pcie_cap;
2446 uint16_t lnk;
2447 struct qla_hw_data *ha = vha->hw;
2448
2449 /* scrub dma mask expansion register */
Giridhar Malavali77e334d2010-09-03 15:20:52 -07002450 qla82xx_wr_32(ha, CRB_DMA_SHIFT, QLA82XX_DMA_SHIFT_VALUE);
Giridhar Malavalia9083012010-04-12 17:59:55 -07002451
Giridhar Malavali37113332010-07-23 15:28:34 +05002452 /* Put both the PEG CMD and RCV PEG to default state
2453 * of 0 before resetting the hardware
2454 */
2455 qla82xx_wr_32(ha, CRB_CMDPEG_STATE, 0);
2456 qla82xx_wr_32(ha, CRB_RCVPEG_STATE, 0);
2457
Giridhar Malavalia9083012010-04-12 17:59:55 -07002458 /* Overwrite stale initialization register values */
2459 qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS1, 0);
2460 qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0);
2461
2462 if (qla82xx_load_fw(vha) != QLA_SUCCESS) {
2463 qla_printk(KERN_INFO, ha,
2464 "%s: Error trying to start fw!\n", __func__);
2465 return QLA_FUNCTION_FAILED;
2466 }
2467
2468 /* Handshake with the card before we register the devices. */
2469 if (qla82xx_check_cmdpeg_state(ha) != QLA_SUCCESS) {
2470 qla_printk(KERN_INFO, ha,
2471 "%s: Error during card handshake!\n", __func__);
2472 return QLA_FUNCTION_FAILED;
2473 }
2474
2475 /* Negotiated Link width */
2476 pcie_cap = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
2477 pci_read_config_word(ha->pdev, pcie_cap + PCI_EXP_LNKSTA, &lnk);
2478 ha->link_width = (lnk >> 4) & 0x3f;
2479
2480 /* Synchronize with Receive peg */
2481 return qla82xx_check_rcvpeg_state(ha);
2482}
2483
2484static inline int
2485qla2xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
2486 uint16_t tot_dsds)
2487{
2488 uint32_t *cur_dsd = NULL;
2489 scsi_qla_host_t *vha;
2490 struct qla_hw_data *ha;
2491 struct scsi_cmnd *cmd;
2492 struct scatterlist *cur_seg;
2493 uint32_t *dsd_seg;
2494 void *next_dsd;
2495 uint8_t avail_dsds;
2496 uint8_t first_iocb = 1;
2497 uint32_t dsd_list_len;
2498 struct dsd_dma *dsd_ptr;
2499 struct ct6_dsd *ctx;
2500
2501 cmd = sp->cmd;
2502
2503 /* Update entry type to indicate Command Type 3 IOCB */
2504 *((uint32_t *)(&cmd_pkt->entry_type)) =
2505 __constant_cpu_to_le32(COMMAND_TYPE_6);
2506
2507 /* No data transfer */
2508 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
2509 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
2510 return 0;
2511 }
2512
2513 vha = sp->fcport->vha;
2514 ha = vha->hw;
2515
2516 /* Set transfer direction */
2517 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
2518 cmd_pkt->control_flags =
2519 __constant_cpu_to_le16(CF_WRITE_DATA);
2520 ha->qla_stats.output_bytes += scsi_bufflen(cmd);
2521 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
2522 cmd_pkt->control_flags =
2523 __constant_cpu_to_le16(CF_READ_DATA);
2524 ha->qla_stats.input_bytes += scsi_bufflen(cmd);
2525 }
2526
2527 cur_seg = scsi_sglist(cmd);
2528 ctx = sp->ctx;
2529
2530 while (tot_dsds) {
2531 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
2532 QLA_DSDS_PER_IOCB : tot_dsds;
2533 tot_dsds -= avail_dsds;
2534 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
2535
2536 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
2537 struct dsd_dma, list);
2538 next_dsd = dsd_ptr->dsd_addr;
2539 list_del(&dsd_ptr->list);
2540 ha->gbl_dsd_avail--;
2541 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
2542 ctx->dsd_use_cnt++;
2543 ha->gbl_dsd_inuse++;
2544
2545 if (first_iocb) {
2546 first_iocb = 0;
2547 dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
2548 *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
2549 *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
2550 *dsd_seg++ = dsd_list_len;
2551 } else {
2552 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
2553 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
2554 *cur_dsd++ = dsd_list_len;
2555 }
2556 cur_dsd = (uint32_t *)next_dsd;
2557 while (avail_dsds) {
2558 dma_addr_t sle_dma;
2559
2560 sle_dma = sg_dma_address(cur_seg);
2561 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2562 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2563 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
2564 cur_seg++;
2565 avail_dsds--;
2566 }
2567 }
2568
2569 /* Null termination */
2570 *cur_dsd++ = 0;
2571 *cur_dsd++ = 0;
2572 *cur_dsd++ = 0;
2573 cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
2574 return 0;
2575}
2576
2577/*
2578 * qla82xx_calc_dsd_lists() - Determine number of DSD list required
2579 * for Command Type 6.
2580 *
2581 * @dsds: number of data segment decriptors needed
2582 *
2583 * Returns the number of dsd list needed to store @dsds.
2584 */
2585inline uint16_t
2586qla82xx_calc_dsd_lists(uint16_t dsds)
2587{
2588 uint16_t dsd_lists = 0;
2589
2590 dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
2591 if (dsds % QLA_DSDS_PER_IOCB)
2592 dsd_lists++;
2593 return dsd_lists;
2594}
2595
2596/*
2597 * qla82xx_start_scsi() - Send a SCSI command to the ISP
2598 * @sp: command to send to the ISP
2599 *
2600 * Returns non-zero if a failure occured, else zero.
2601 */
2602int
2603qla82xx_start_scsi(srb_t *sp)
2604{
2605 int ret, nseg;
2606 unsigned long flags;
2607 struct scsi_cmnd *cmd;
2608 uint32_t *clr_ptr;
2609 uint32_t index;
2610 uint32_t handle;
2611 uint16_t cnt;
2612 uint16_t req_cnt;
2613 uint16_t tot_dsds;
2614 struct device_reg_82xx __iomem *reg;
2615 uint32_t dbval;
2616 uint32_t *fcp_dl;
2617 uint8_t additional_cdb_len;
2618 struct ct6_dsd *ctx;
2619 struct scsi_qla_host *vha = sp->fcport->vha;
2620 struct qla_hw_data *ha = vha->hw;
2621 struct req_que *req = NULL;
2622 struct rsp_que *rsp = NULL;
2623
2624 /* Setup device pointers. */
2625 ret = 0;
2626 reg = &ha->iobase->isp82;
2627 cmd = sp->cmd;
2628 req = vha->req;
2629 rsp = ha->rsp_q_map[0];
2630
2631 /* So we know we haven't pci_map'ed anything yet */
2632 tot_dsds = 0;
2633
2634 dbval = 0x04 | (ha->portnum << 5);
2635
2636 /* Send marker if required */
2637 if (vha->marker_needed != 0) {
2638 if (qla2x00_marker(vha, req,
2639 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
2640 return QLA_FUNCTION_FAILED;
2641 vha->marker_needed = 0;
2642 }
2643
2644 /* Acquire ring specific lock */
2645 spin_lock_irqsave(&ha->hardware_lock, flags);
2646
2647 /* Check for room in outstanding command list. */
2648 handle = req->current_outstanding_cmd;
2649 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
2650 handle++;
2651 if (handle == MAX_OUTSTANDING_COMMANDS)
2652 handle = 1;
2653 if (!req->outstanding_cmds[handle])
2654 break;
2655 }
2656 if (index == MAX_OUTSTANDING_COMMANDS)
2657 goto queuing_error;
2658
2659 /* Map the sg table so we have an accurate count of sg entries needed */
2660 if (scsi_sg_count(cmd)) {
2661 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2662 scsi_sg_count(cmd), cmd->sc_data_direction);
2663 if (unlikely(!nseg))
2664 goto queuing_error;
2665 } else
2666 nseg = 0;
2667
2668 tot_dsds = nseg;
2669
2670 if (tot_dsds > ql2xshiftctondsd) {
2671 struct cmd_type_6 *cmd_pkt;
2672 uint16_t more_dsd_lists = 0;
2673 struct dsd_dma *dsd_ptr;
2674 uint16_t i;
2675
2676 more_dsd_lists = qla82xx_calc_dsd_lists(tot_dsds);
2677 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN)
2678 goto queuing_error;
2679
2680 if (more_dsd_lists <= ha->gbl_dsd_avail)
2681 goto sufficient_dsds;
2682 else
2683 more_dsd_lists -= ha->gbl_dsd_avail;
2684
2685 for (i = 0; i < more_dsd_lists; i++) {
2686 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2687 if (!dsd_ptr)
2688 goto queuing_error;
2689
2690 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2691 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2692 if (!dsd_ptr->dsd_addr) {
2693 kfree(dsd_ptr);
2694 goto queuing_error;
2695 }
2696 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2697 ha->gbl_dsd_avail++;
2698 }
2699
2700sufficient_dsds:
2701 req_cnt = 1;
2702
Giridhar Malavali1bd58b82010-09-03 14:57:05 -07002703 if (req->cnt < (req_cnt + 2)) {
2704 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2705 &reg->req_q_out[0]);
2706 if (req->ring_index < cnt)
2707 req->cnt = cnt - req->ring_index;
2708 else
2709 req->cnt = req->length -
2710 (req->ring_index - cnt);
2711 }
2712
2713 if (req->cnt < (req_cnt + 2))
2714 goto queuing_error;
2715
Giridhar Malavalia9083012010-04-12 17:59:55 -07002716 ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2717 if (!sp->ctx) {
2718 DEBUG(printk(KERN_INFO
2719 "%s(%ld): failed to allocate"
2720 " ctx.\n", __func__, vha->host_no));
2721 goto queuing_error;
2722 }
2723 memset(ctx, 0, sizeof(struct ct6_dsd));
2724 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2725 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2726 if (!ctx->fcp_cmnd) {
2727 DEBUG2_3(printk("%s(%ld): failed to allocate"
2728 " fcp_cmnd.\n", __func__, vha->host_no));
2729 goto queuing_error_fcp_cmnd;
2730 }
2731
2732 /* Initialize the DSD list and dma handle */
2733 INIT_LIST_HEAD(&ctx->dsd_list);
2734 ctx->dsd_use_cnt = 0;
2735
2736 if (cmd->cmd_len > 16) {
2737 additional_cdb_len = cmd->cmd_len - 16;
2738 if ((cmd->cmd_len % 4) != 0) {
2739 /* SCSI command bigger than 16 bytes must be
2740 * multiple of 4
2741 */
2742 goto queuing_error_fcp_cmnd;
2743 }
2744 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2745 } else {
2746 additional_cdb_len = 0;
2747 ctx->fcp_cmnd_len = 12 + 16 + 4;
2748 }
2749
2750 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2751 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2752
2753 /* Zero out remaining portion of packet. */
2754 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
2755 clr_ptr = (uint32_t *)cmd_pkt + 2;
2756 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2757 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2758
2759 /* Set NPORT-ID and LUN number*/
2760 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2761 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2762 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2763 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2764 cmd_pkt->vp_index = sp->fcport->vp_idx;
2765
2766 /* Build IOCB segments */
2767 if (qla2xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2768 goto queuing_error_fcp_cmnd;
2769
2770 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
Mike Hernandez85727e12010-11-23 16:52:46 -08002771 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
Giridhar Malavalia9083012010-04-12 17:59:55 -07002772
2773 /* build FCP_CMND IU */
2774 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2775 int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
2776 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2777
2778 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2779 ctx->fcp_cmnd->additional_cdb_len |= 1;
2780 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2781 ctx->fcp_cmnd->additional_cdb_len |= 2;
2782
2783 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2784
2785 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2786 additional_cdb_len);
2787 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2788
2789 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2790 cmd_pkt->fcp_cmnd_dseg_address[0] =
2791 cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2792 cmd_pkt->fcp_cmnd_dseg_address[1] =
2793 cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2794
2795 sp->flags |= SRB_FCP_CMND_DMA_VALID;
2796 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2797 /* Set total data segment count. */
2798 cmd_pkt->entry_count = (uint8_t)req_cnt;
2799 /* Specify response queue number where
2800 * completion should happen
2801 */
2802 cmd_pkt->entry_status = (uint8_t) rsp->id;
2803 } else {
2804 struct cmd_type_7 *cmd_pkt;
2805 req_cnt = qla24xx_calc_iocbs(tot_dsds);
2806 if (req->cnt < (req_cnt + 2)) {
2807 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2808 &reg->req_q_out[0]);
2809 if (req->ring_index < cnt)
2810 req->cnt = cnt - req->ring_index;
2811 else
2812 req->cnt = req->length -
2813 (req->ring_index - cnt);
2814 }
2815 if (req->cnt < (req_cnt + 2))
2816 goto queuing_error;
2817
2818 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2819 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2820
2821 /* Zero out remaining portion of packet. */
2822 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2823 clr_ptr = (uint32_t *)cmd_pkt + 2;
2824 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2825 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2826
2827 /* Set NPORT-ID and LUN number*/
2828 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2829 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2830 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2831 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2832 cmd_pkt->vp_index = sp->fcport->vp_idx;
2833
2834 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
2835 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
2836 sizeof(cmd_pkt->lun));
2837
2838 /* Load SCSI command packet. */
2839 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2840 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2841
2842 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2843
2844 /* Build IOCB segments */
2845 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
2846
2847 /* Set total data segment count. */
2848 cmd_pkt->entry_count = (uint8_t)req_cnt;
2849 /* Specify response queue number where
2850 * completion should happen.
2851 */
2852 cmd_pkt->entry_status = (uint8_t) rsp->id;
2853
2854 }
2855 /* Build command packet. */
2856 req->current_outstanding_cmd = handle;
2857 req->outstanding_cmds[handle] = sp;
2858 sp->handle = handle;
2859 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2860 req->cnt -= req_cnt;
2861 wmb();
2862
2863 /* Adjust ring index. */
2864 req->ring_index++;
2865 if (req->ring_index == req->length) {
2866 req->ring_index = 0;
2867 req->ring_ptr = req->ring;
2868 } else
2869 req->ring_ptr++;
2870
2871 sp->flags |= SRB_DMA_VALID;
2872
2873 /* Set chip new ring index. */
2874 /* write, read and verify logic */
2875 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
2876 if (ql2xdbwr)
2877 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
2878 else {
2879 WRT_REG_DWORD(
2880 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2881 dbval);
2882 wmb();
2883 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
2884 WRT_REG_DWORD(
2885 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2886 dbval);
2887 wmb();
2888 }
2889 }
2890
2891 /* Manage unprocessed RIO/ZIO commands in response queue. */
2892 if (vha->flags.process_response_queue &&
2893 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2894 qla24xx_process_response_queue(vha, rsp);
2895
2896 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2897 return QLA_SUCCESS;
2898
2899queuing_error_fcp_cmnd:
2900 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
2901queuing_error:
2902 if (tot_dsds)
2903 scsi_dma_unmap(cmd);
2904
2905 if (sp->ctx) {
2906 mempool_free(sp->ctx, ha->ctx_mempool);
2907 sp->ctx = NULL;
2908 }
2909 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2910
2911 return QLA_FUNCTION_FAILED;
2912}
2913
Giridhar Malavali77e334d2010-09-03 15:20:52 -07002914static uint32_t *
Giridhar Malavalia9083012010-04-12 17:59:55 -07002915qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
2916 uint32_t length)
2917{
2918 uint32_t i;
2919 uint32_t val;
2920 struct qla_hw_data *ha = vha->hw;
2921
2922 /* Dword reads to flash. */
2923 for (i = 0; i < length/4; i++, faddr += 4) {
2924 if (qla82xx_rom_fast_read(ha, faddr, &val)) {
2925 qla_printk(KERN_WARNING, ha,
2926 "Do ROM fast read failed\n");
2927 goto done_read;
2928 }
2929 dwptr[i] = __constant_cpu_to_le32(val);
2930 }
2931done_read:
2932 return dwptr;
2933}
2934
Giridhar Malavali77e334d2010-09-03 15:20:52 -07002935static int
Giridhar Malavalia9083012010-04-12 17:59:55 -07002936qla82xx_unprotect_flash(struct qla_hw_data *ha)
2937{
2938 int ret;
2939 uint32_t val;
2940
2941 ret = ql82xx_rom_lock_d(ha);
2942 if (ret < 0) {
2943 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n");
2944 return ret;
2945 }
2946
2947 ret = qla82xx_read_status_reg(ha, &val);
2948 if (ret < 0)
2949 goto done_unprotect;
2950
Lalit Chandivade0547fb32010-05-28 15:08:26 -07002951 val &= ~(BLOCK_PROTECT_BITS << 2);
Giridhar Malavalia9083012010-04-12 17:59:55 -07002952 ret = qla82xx_write_status_reg(ha, val);
2953 if (ret < 0) {
Lalit Chandivade0547fb32010-05-28 15:08:26 -07002954 val |= (BLOCK_PROTECT_BITS << 2);
Giridhar Malavalia9083012010-04-12 17:59:55 -07002955 qla82xx_write_status_reg(ha, val);
2956 }
2957
2958 if (qla82xx_write_disable_flash(ha) != 0)
2959 qla_printk(KERN_WARNING, ha, "Write disable failed\n");
2960
2961done_unprotect:
2962 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
2963 return ret;
2964}
2965
Giridhar Malavali77e334d2010-09-03 15:20:52 -07002966static int
Giridhar Malavalia9083012010-04-12 17:59:55 -07002967qla82xx_protect_flash(struct qla_hw_data *ha)
2968{
2969 int ret;
2970 uint32_t val;
2971
2972 ret = ql82xx_rom_lock_d(ha);
2973 if (ret < 0) {
2974 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n");
2975 return ret;
2976 }
2977
2978 ret = qla82xx_read_status_reg(ha, &val);
2979 if (ret < 0)
2980 goto done_protect;
2981
Lalit Chandivade0547fb32010-05-28 15:08:26 -07002982 val |= (BLOCK_PROTECT_BITS << 2);
Giridhar Malavalia9083012010-04-12 17:59:55 -07002983 /* LOCK all sectors */
2984 ret = qla82xx_write_status_reg(ha, val);
2985 if (ret < 0)
2986 qla_printk(KERN_WARNING, ha, "Write status register failed\n");
2987
2988 if (qla82xx_write_disable_flash(ha) != 0)
2989 qla_printk(KERN_WARNING, ha, "Write disable failed\n");
2990done_protect:
2991 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
2992 return ret;
2993}
2994
Giridhar Malavali77e334d2010-09-03 15:20:52 -07002995static int
Giridhar Malavalia9083012010-04-12 17:59:55 -07002996qla82xx_erase_sector(struct qla_hw_data *ha, int addr)
2997{
2998 int ret = 0;
2999
3000 ret = ql82xx_rom_lock_d(ha);
3001 if (ret < 0) {
3002 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n");
3003 return ret;
3004 }
3005
3006 qla82xx_flash_set_write_enable(ha);
3007 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
3008 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
3009 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_SE);
3010
3011 if (qla82xx_wait_rom_done(ha)) {
3012 qla_printk(KERN_WARNING, ha,
3013 "Error waiting for rom done\n");
3014 ret = -1;
3015 goto done;
3016 }
3017 ret = qla82xx_flash_wait_write_finish(ha);
3018done:
3019 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
3020 return ret;
3021}
3022
3023/*
3024 * Address and length are byte address
3025 */
3026uint8_t *
3027qla82xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
3028 uint32_t offset, uint32_t length)
3029{
3030 scsi_block_requests(vha->host);
3031 qla82xx_read_flash_data(vha, (uint32_t *)buf, offset, length);
3032 scsi_unblock_requests(vha->host);
3033 return buf;
3034}
3035
3036static int
3037qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
3038 uint32_t faddr, uint32_t dwords)
3039{
3040 int ret;
3041 uint32_t liter;
3042 uint32_t sec_mask, rest_addr;
3043 dma_addr_t optrom_dma;
3044 void *optrom = NULL;
3045 int page_mode = 0;
3046 struct qla_hw_data *ha = vha->hw;
3047
3048 ret = -1;
3049
3050 /* Prepare burst-capable write on supported ISPs. */
3051 if (page_mode && !(faddr & 0xfff) &&
3052 dwords > OPTROM_BURST_DWORDS) {
3053 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
3054 &optrom_dma, GFP_KERNEL);
3055 if (!optrom) {
3056 qla_printk(KERN_DEBUG, ha,
3057 "Unable to allocate memory for optrom "
3058 "burst write (%x KB).\n",
3059 OPTROM_BURST_SIZE / 1024);
3060 }
3061 }
3062
3063 rest_addr = ha->fdt_block_size - 1;
3064 sec_mask = ~rest_addr;
3065
3066 ret = qla82xx_unprotect_flash(ha);
3067 if (ret) {
3068 qla_printk(KERN_WARNING, ha,
3069 "Unable to unprotect flash for update.\n");
3070 goto write_done;
3071 }
3072
3073 for (liter = 0; liter < dwords; liter++, faddr += 4, dwptr++) {
3074 /* Are we at the beginning of a sector? */
3075 if ((faddr & rest_addr) == 0) {
3076
3077 ret = qla82xx_erase_sector(ha, faddr);
3078 if (ret) {
3079 DEBUG9(qla_printk(KERN_ERR, ha,
3080 "Unable to erase sector: "
3081 "address=%x.\n", faddr));
3082 break;
3083 }
3084 }
3085
3086 /* Go with burst-write. */
3087 if (optrom && (liter + OPTROM_BURST_DWORDS) <= dwords) {
3088 /* Copy data to DMA'ble buffer. */
3089 memcpy(optrom, dwptr, OPTROM_BURST_SIZE);
3090
3091 ret = qla2x00_load_ram(vha, optrom_dma,
3092 (ha->flash_data_off | faddr),
3093 OPTROM_BURST_DWORDS);
3094 if (ret != QLA_SUCCESS) {
3095 qla_printk(KERN_WARNING, ha,
3096 "Unable to burst-write optrom segment "
3097 "(%x/%x/%llx).\n", ret,
3098 (ha->flash_data_off | faddr),
3099 (unsigned long long)optrom_dma);
3100 qla_printk(KERN_WARNING, ha,
3101 "Reverting to slow-write.\n");
3102
3103 dma_free_coherent(&ha->pdev->dev,
3104 OPTROM_BURST_SIZE, optrom, optrom_dma);
3105 optrom = NULL;
3106 } else {
3107 liter += OPTROM_BURST_DWORDS - 1;
3108 faddr += OPTROM_BURST_DWORDS - 1;
3109 dwptr += OPTROM_BURST_DWORDS - 1;
3110 continue;
3111 }
3112 }
3113
3114 ret = qla82xx_write_flash_dword(ha, faddr,
3115 cpu_to_le32(*dwptr));
3116 if (ret) {
3117 DEBUG9(printk(KERN_DEBUG "%s(%ld) Unable to program"
3118 "flash address=%x data=%x.\n", __func__,
3119 ha->host_no, faddr, *dwptr));
3120 break;
3121 }
3122 }
3123
3124 ret = qla82xx_protect_flash(ha);
3125 if (ret)
3126 qla_printk(KERN_WARNING, ha,
3127 "Unable to protect flash after update.\n");
3128write_done:
3129 if (optrom)
3130 dma_free_coherent(&ha->pdev->dev,
3131 OPTROM_BURST_SIZE, optrom, optrom_dma);
3132 return ret;
3133}
3134
3135int
3136qla82xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
3137 uint32_t offset, uint32_t length)
3138{
3139 int rval;
3140
3141 /* Suspend HBA. */
3142 scsi_block_requests(vha->host);
3143 rval = qla82xx_write_flash_data(vha, (uint32_t *)buf, offset,
3144 length >> 2);
3145 scsi_unblock_requests(vha->host);
3146
3147 /* Convert return ISP82xx to generic */
3148 if (rval)
3149 rval = QLA_FUNCTION_FAILED;
3150 else
3151 rval = QLA_SUCCESS;
3152 return rval;
3153}
3154
3155void
3156qla82xx_start_iocbs(srb_t *sp)
3157{
3158 struct qla_hw_data *ha = sp->fcport->vha->hw;
3159 struct req_que *req = ha->req_q_map[0];
3160 struct device_reg_82xx __iomem *reg;
3161 uint32_t dbval;
3162
3163 /* Adjust ring index. */
3164 req->ring_index++;
3165 if (req->ring_index == req->length) {
3166 req->ring_index = 0;
3167 req->ring_ptr = req->ring;
3168 } else
3169 req->ring_ptr++;
3170
3171 reg = &ha->iobase->isp82;
3172 dbval = 0x04 | (ha->portnum << 5);
3173
3174 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
Giridhar Malavali69078692010-05-28 15:08:28 -07003175 if (ql2xdbwr)
3176 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
3177 else {
3178 WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr, dbval);
Giridhar Malavalia9083012010-04-12 17:59:55 -07003179 wmb();
Giridhar Malavali69078692010-05-28 15:08:28 -07003180 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
3181 WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr,
3182 dbval);
3183 wmb();
3184 }
Giridhar Malavalia9083012010-04-12 17:59:55 -07003185 }
3186}
3187
Shyam Sundare6a42022010-09-07 20:55:32 -07003188void qla82xx_rom_lock_recovery(struct qla_hw_data *ha)
3189{
3190 if (qla82xx_rom_lock(ha))
3191 /* Someone else is holding the lock. */
3192 qla_printk(KERN_INFO, ha, "Resetting rom_lock\n");
3193
3194 /*
3195 * Either we got the lock, or someone
3196 * else died while holding it.
3197 * In either case, unlock.
3198 */
3199 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
3200}
3201
Giridhar Malavalia9083012010-04-12 17:59:55 -07003202/*
3203 * qla82xx_device_bootstrap
3204 * Initialize device, set DEV_READY, start fw
3205 *
3206 * Note:
3207 * IDC lock must be held upon entry
3208 *
3209 * Return:
3210 * Success : 0
3211 * Failed : 1
3212 */
3213static int
3214qla82xx_device_bootstrap(scsi_qla_host_t *vha)
3215{
Shyam Sundare6a42022010-09-07 20:55:32 -07003216 int rval = QLA_SUCCESS;
3217 int i, timeout;
Giridhar Malavalia9083012010-04-12 17:59:55 -07003218 uint32_t old_count, count;
3219 struct qla_hw_data *ha = vha->hw;
Shyam Sundare6a42022010-09-07 20:55:32 -07003220 int need_reset = 0, peg_stuck = 1;
Giridhar Malavalia9083012010-04-12 17:59:55 -07003221
Shyam Sundare6a42022010-09-07 20:55:32 -07003222 need_reset = qla82xx_need_reset(ha);
Giridhar Malavalia9083012010-04-12 17:59:55 -07003223
3224 old_count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
3225
3226 for (i = 0; i < 10; i++) {
3227 timeout = msleep_interruptible(200);
3228 if (timeout) {
3229 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3230 QLA82XX_DEV_FAILED);
3231 return QLA_FUNCTION_FAILED;
3232 }
3233
3234 count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
3235 if (count != old_count)
Shyam Sundare6a42022010-09-07 20:55:32 -07003236 peg_stuck = 0;
3237 }
3238
3239 if (need_reset) {
3240 /* We are trying to perform a recovery here. */
3241 if (peg_stuck)
3242 qla82xx_rom_lock_recovery(ha);
3243 goto dev_initialize;
3244 } else {
3245 /* Start of day for this ha context. */
3246 if (peg_stuck) {
3247 /* Either we are the first or recovery in progress. */
3248 qla82xx_rom_lock_recovery(ha);
3249 goto dev_initialize;
3250 } else
3251 /* Firmware already running. */
Giridhar Malavalia9083012010-04-12 17:59:55 -07003252 goto dev_ready;
3253 }
3254
Shyam Sundare6a42022010-09-07 20:55:32 -07003255 return rval;
3256
Giridhar Malavalia9083012010-04-12 17:59:55 -07003257dev_initialize:
3258 /* set to DEV_INITIALIZING */
3259 qla_printk(KERN_INFO, ha, "HW State: INITIALIZING\n");
3260 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_INITIALIZING);
3261
3262 /* Driver that sets device state to initializating sets IDC version */
3263 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, QLA82XX_IDC_VERSION);
3264
3265 qla82xx_idc_unlock(ha);
3266 rval = qla82xx_start_firmware(vha);
3267 qla82xx_idc_lock(ha);
3268
3269 if (rval != QLA_SUCCESS) {
3270 qla_printk(KERN_INFO, ha, "HW State: FAILED\n");
3271 qla82xx_clear_drv_active(ha);
3272 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_FAILED);
3273 return rval;
3274 }
3275
3276dev_ready:
3277 qla_printk(KERN_INFO, ha, "HW State: READY\n");
3278 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY);
3279
3280 return QLA_SUCCESS;
3281}
3282
Saurav Kashyap579d12b2010-12-21 16:00:14 -08003283/*
3284* qla82xx_need_qsnt_handler
3285* Code to start quiescence sequence
3286*
3287* Note:
3288* IDC lock must be held upon entry
3289*
3290* Return: void
3291*/
3292
3293static void
3294qla82xx_need_qsnt_handler(scsi_qla_host_t *vha)
3295{
3296 struct qla_hw_data *ha = vha->hw;
3297 uint32_t dev_state, drv_state, drv_active;
3298 unsigned long reset_timeout;
3299
3300 if (vha->flags.online) {
3301 /*Block any further I/O and wait for pending cmnds to complete*/
3302 qla82xx_quiescent_state_cleanup(vha);
3303 }
3304
3305 /* Set the quiescence ready bit */
3306 qla82xx_set_qsnt_ready(ha);
3307
3308 /*wait for 30 secs for other functions to ack */
3309 reset_timeout = jiffies + (30 * HZ);
3310
3311 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3312 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3313 /* Its 2 that is written when qsnt is acked, moving one bit */
3314 drv_active = drv_active << 0x01;
3315
3316 while (drv_state != drv_active) {
3317
3318 if (time_after_eq(jiffies, reset_timeout)) {
3319 /* quiescence timeout, other functions didn't ack
3320 * changing the state to DEV_READY
3321 */
3322 qla_printk(KERN_INFO, ha,
3323 "%s: QUIESCENT TIMEOUT\n", QLA2XXX_DRIVER_NAME);
3324 qla_printk(KERN_INFO, ha,
3325 "DRV_ACTIVE:%d DRV_STATE:%d\n", drv_active,
3326 drv_state);
3327 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3328 QLA82XX_DEV_READY);
3329 qla_printk(KERN_INFO, ha,
3330 "HW State: DEV_READY\n");
3331 qla82xx_idc_unlock(ha);
3332 qla2x00_perform_loop_resync(vha);
3333 qla82xx_idc_lock(ha);
3334
3335 qla82xx_clear_qsnt_ready(vha);
3336 return;
3337 }
3338
3339 qla82xx_idc_unlock(ha);
3340 msleep(1000);
3341 qla82xx_idc_lock(ha);
3342
3343 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3344 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3345 drv_active = drv_active << 0x01;
3346 }
3347 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3348 /* everyone acked so set the state to DEV_QUIESCENCE */
3349 if (dev_state == QLA82XX_DEV_NEED_QUIESCENT) {
3350 qla_printk(KERN_INFO, ha, "HW State: DEV_QUIESCENT\n");
3351 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_QUIESCENT);
3352 }
3353}
3354
3355/*
3356* qla82xx_wait_for_state_change
3357* Wait for device state to change from given current state
3358*
3359* Note:
3360* IDC lock must not be held upon entry
3361*
3362* Return:
3363* Changed device state.
3364*/
3365uint32_t
3366qla82xx_wait_for_state_change(scsi_qla_host_t *vha, uint32_t curr_state)
3367{
3368 struct qla_hw_data *ha = vha->hw;
3369 uint32_t dev_state;
3370
3371 do {
3372 msleep(1000);
3373 qla82xx_idc_lock(ha);
3374 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3375 qla82xx_idc_unlock(ha);
3376 } while (dev_state == curr_state);
3377
3378 return dev_state;
3379}
3380
Giridhar Malavalia9083012010-04-12 17:59:55 -07003381static void
3382qla82xx_dev_failed_handler(scsi_qla_host_t *vha)
3383{
3384 struct qla_hw_data *ha = vha->hw;
3385
3386 /* Disable the board */
3387 qla_printk(KERN_INFO, ha, "Disabling the board\n");
3388
Giridhar Malavalib9637522010-05-28 15:08:15 -07003389 qla82xx_idc_lock(ha);
3390 qla82xx_clear_drv_active(ha);
3391 qla82xx_idc_unlock(ha);
3392
Giridhar Malavalia9083012010-04-12 17:59:55 -07003393 /* Set DEV_FAILED flag to disable timer */
3394 vha->device_flags |= DFLG_DEV_FAILED;
3395 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
3396 qla2x00_mark_all_devices_lost(vha, 0);
3397 vha->flags.online = 0;
3398 vha->flags.init_done = 0;
3399}
3400
3401/*
3402 * qla82xx_need_reset_handler
3403 * Code to start reset sequence
3404 *
3405 * Note:
3406 * IDC lock must be held upon entry
3407 *
3408 * Return:
3409 * Success : 0
3410 * Failed : 1
3411 */
3412static void
3413qla82xx_need_reset_handler(scsi_qla_host_t *vha)
3414{
3415 uint32_t dev_state, drv_state, drv_active;
3416 unsigned long reset_timeout;
3417 struct qla_hw_data *ha = vha->hw;
3418 struct req_que *req = ha->req_q_map[0];
3419
3420 if (vha->flags.online) {
3421 qla82xx_idc_unlock(ha);
3422 qla2x00_abort_isp_cleanup(vha);
3423 ha->isp_ops->get_flash_version(vha, req->ring);
3424 ha->isp_ops->nvram_config(vha);
3425 qla82xx_idc_lock(ha);
3426 }
3427
3428 qla82xx_set_rst_ready(ha);
3429
3430 /* wait for 10 seconds for reset ack from all functions */
3431 reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
3432
3433 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3434 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3435
3436 while (drv_state != drv_active) {
3437 if (time_after_eq(jiffies, reset_timeout)) {
3438 qla_printk(KERN_INFO, ha,
3439 "%s: RESET TIMEOUT!\n", QLA2XXX_DRIVER_NAME);
3440 break;
3441 }
3442 qla82xx_idc_unlock(ha);
3443 msleep(1000);
3444 qla82xx_idc_lock(ha);
3445 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3446 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3447 }
3448
3449 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
Giridhar Malavalif1af6202010-05-04 15:01:34 -07003450 qla_printk(KERN_INFO, ha, "3:Device state is 0x%x = %s\n", dev_state,
3451 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
3452
Giridhar Malavalia9083012010-04-12 17:59:55 -07003453 /* Force to DEV_COLD unless someone else is starting a reset */
3454 if (dev_state != QLA82XX_DEV_INITIALIZING) {
3455 qla_printk(KERN_INFO, ha, "HW State: COLD/RE-INIT\n");
3456 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD);
3457 }
3458}
3459
3460static void
3461qla82xx_check_fw_alive(scsi_qla_host_t *vha)
3462{
3463 uint32_t fw_heartbeat_counter, halt_status;
3464 struct qla_hw_data *ha = vha->hw;
3465
3466 fw_heartbeat_counter = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
Lalit Chandivadea5b36322010-09-03 15:20:50 -07003467 /* all 0xff, assume AER/EEH in progress, ignore */
3468 if (fw_heartbeat_counter == 0xffffffff)
3469 return;
Giridhar Malavalia9083012010-04-12 17:59:55 -07003470 if (vha->fw_heartbeat_counter == fw_heartbeat_counter) {
3471 vha->seconds_since_last_heartbeat++;
3472 /* FW not alive after 2 seconds */
3473 if (vha->seconds_since_last_heartbeat == 2) {
3474 vha->seconds_since_last_heartbeat = 0;
3475 halt_status = qla82xx_rd_32(ha,
3476 QLA82XX_PEG_HALT_STATUS1);
3477 if (halt_status & HALT_STATUS_UNRECOVERABLE) {
3478 set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
3479 } else {
3480 qla_printk(KERN_INFO, ha,
3481 "scsi(%ld): %s - detect abort needed\n",
3482 vha->host_no, __func__);
3483 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3484 }
3485 qla2xxx_wake_dpc(vha);
Giridhar Malavali4142b192010-09-03 14:57:03 -07003486 ha->flags.fw_hung = 1;
Santosh Vernekarcdbb0a4f2010-05-28 15:08:25 -07003487 if (ha->flags.mbox_busy) {
Santosh Vernekarcdbb0a4f2010-05-28 15:08:25 -07003488 ha->flags.mbox_int = 1;
3489 DEBUG2(qla_printk(KERN_ERR, ha,
Giridhar Malavali4142b192010-09-03 14:57:03 -07003490 "Due to fw hung, doing premature "
3491 "completion of mbx command\n"));
3492 if (test_bit(MBX_INTR_WAIT,
3493 &ha->mbx_cmd_flags))
3494 complete(&ha->mbx_intr_comp);
Santosh Vernekarcdbb0a4f2010-05-28 15:08:25 -07003495 }
Giridhar Malavalia9083012010-04-12 17:59:55 -07003496 }
Lalit Chandivadeefa786c2010-09-03 14:57:02 -07003497 } else
3498 vha->seconds_since_last_heartbeat = 0;
Giridhar Malavalia9083012010-04-12 17:59:55 -07003499 vha->fw_heartbeat_counter = fw_heartbeat_counter;
3500}
3501
3502/*
3503 * qla82xx_device_state_handler
3504 * Main state handler
3505 *
3506 * Note:
3507 * IDC lock must be held upon entry
3508 *
3509 * Return:
3510 * Success : 0
3511 * Failed : 1
3512 */
3513int
3514qla82xx_device_state_handler(scsi_qla_host_t *vha)
3515{
3516 uint32_t dev_state;
Giridhar Malavalia9083012010-04-12 17:59:55 -07003517 int rval = QLA_SUCCESS;
3518 unsigned long dev_init_timeout;
3519 struct qla_hw_data *ha = vha->hw;
3520
3521 qla82xx_idc_lock(ha);
3522 if (!vha->flags.init_done)
3523 qla82xx_set_drv_active(vha);
3524
Giridhar Malavalif1af6202010-05-04 15:01:34 -07003525 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3526 qla_printk(KERN_INFO, ha, "1:Device state is 0x%x = %s\n", dev_state,
3527 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
Giridhar Malavalia9083012010-04-12 17:59:55 -07003528
3529 /* wait for 30 seconds for device to go ready */
3530 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
3531
3532 while (1) {
3533
3534 if (time_after_eq(jiffies, dev_init_timeout)) {
3535 DEBUG(qla_printk(KERN_INFO, ha,
3536 "%s: device init failed!\n",
3537 QLA2XXX_DRIVER_NAME));
3538 rval = QLA_FUNCTION_FAILED;
3539 break;
3540 }
3541 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
Giridhar Malavalif1af6202010-05-04 15:01:34 -07003542 qla_printk(KERN_INFO, ha,
3543 "2:Device state is 0x%x = %s\n", dev_state,
3544 dev_state < MAX_STATES ?
3545 qdev_state[dev_state] : "Unknown");
3546
Giridhar Malavalia9083012010-04-12 17:59:55 -07003547 switch (dev_state) {
3548 case QLA82XX_DEV_READY:
3549 goto exit;
3550 case QLA82XX_DEV_COLD:
3551 rval = qla82xx_device_bootstrap(vha);
3552 goto exit;
3553 case QLA82XX_DEV_INITIALIZING:
3554 qla82xx_idc_unlock(ha);
3555 msleep(1000);
3556 qla82xx_idc_lock(ha);
3557 break;
3558 case QLA82XX_DEV_NEED_RESET:
Giridhar Malavali0ce87912010-12-21 16:00:25 -08003559 qla82xx_need_reset_handler(vha);
Giridhar Malavalia9083012010-04-12 17:59:55 -07003560 break;
3561 case QLA82XX_DEV_NEED_QUIESCENT:
Saurav Kashyap579d12b2010-12-21 16:00:14 -08003562 qla82xx_need_qsnt_handler(vha);
3563 /* Reset timeout value after quiescence handler */
3564 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout\
3565 * HZ);
3566 break;
Giridhar Malavalia9083012010-04-12 17:59:55 -07003567 case QLA82XX_DEV_QUIESCENT:
Saurav Kashyap579d12b2010-12-21 16:00:14 -08003568 /* Owner will exit and other will wait for the state
3569 * to get changed
3570 */
3571 if (ha->flags.quiesce_owner)
3572 goto exit;
3573
Giridhar Malavalia9083012010-04-12 17:59:55 -07003574 qla82xx_idc_unlock(ha);
3575 msleep(1000);
3576 qla82xx_idc_lock(ha);
Saurav Kashyap579d12b2010-12-21 16:00:14 -08003577
3578 /* Reset timeout value after quiescence handler */
3579 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout\
3580 * HZ);
Giridhar Malavalia9083012010-04-12 17:59:55 -07003581 break;
3582 case QLA82XX_DEV_FAILED:
3583 qla82xx_dev_failed_handler(vha);
3584 rval = QLA_FUNCTION_FAILED;
3585 goto exit;
3586 default:
3587 qla82xx_idc_unlock(ha);
3588 msleep(1000);
3589 qla82xx_idc_lock(ha);
3590 }
3591 }
3592exit:
3593 qla82xx_idc_unlock(ha);
3594 return rval;
3595}
3596
3597void qla82xx_watchdog(scsi_qla_host_t *vha)
3598{
3599 uint32_t dev_state;
3600 struct qla_hw_data *ha = vha->hw;
3601
3602 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3603
3604 /* don't poll if reset is going on */
3605 if (!(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
3606 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
3607 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))) {
3608 if (dev_state == QLA82XX_DEV_NEED_RESET) {
3609 qla_printk(KERN_WARNING, ha,
3610 "%s(): Adapter reset needed!\n", __func__);
3611 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3612 qla2xxx_wake_dpc(vha);
Giridhar Malavali4142b192010-09-03 14:57:03 -07003613 ha->flags.fw_hung = 1;
Santosh Vernekarcdbb0a4f2010-05-28 15:08:25 -07003614 if (ha->flags.mbox_busy) {
Santosh Vernekarcdbb0a4f2010-05-28 15:08:25 -07003615 ha->flags.mbox_int = 1;
3616 DEBUG2(qla_printk(KERN_ERR, ha,
Giridhar Malavali4142b192010-09-03 14:57:03 -07003617 "Need reset, doing premature "
3618 "completion of mbx command\n"));
3619 if (test_bit(MBX_INTR_WAIT,
3620 &ha->mbx_cmd_flags))
3621 complete(&ha->mbx_intr_comp);
Santosh Vernekarcdbb0a4f2010-05-28 15:08:25 -07003622 }
Saurav Kashyap579d12b2010-12-21 16:00:14 -08003623 } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
3624 !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) {
3625 DEBUG(qla_printk(KERN_INFO, ha,
3626 "scsi(%ld) %s - detected quiescence needed\n",
3627 vha->host_no, __func__));
3628 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
3629 qla2xxx_wake_dpc(vha);
Giridhar Malavalia9083012010-04-12 17:59:55 -07003630 } else {
3631 qla82xx_check_fw_alive(vha);
3632 }
3633 }
3634}
3635
3636int qla82xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
3637{
3638 int rval;
3639 rval = qla82xx_device_state_handler(vha);
3640 return rval;
3641}
3642
3643/*
3644 * qla82xx_abort_isp
3645 * Resets ISP and aborts all outstanding commands.
3646 *
3647 * Input:
3648 * ha = adapter block pointer.
3649 *
3650 * Returns:
3651 * 0 = success
3652 */
3653int
3654qla82xx_abort_isp(scsi_qla_host_t *vha)
3655{
3656 int rval;
3657 struct qla_hw_data *ha = vha->hw;
3658 uint32_t dev_state;
3659
3660 if (vha->device_flags & DFLG_DEV_FAILED) {
3661 qla_printk(KERN_WARNING, ha,
3662 "%s(%ld): Device in failed state, "
3663 "Exiting.\n", __func__, vha->host_no);
3664 return QLA_SUCCESS;
3665 }
3666
3667 qla82xx_idc_lock(ha);
3668 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
Giridhar Malavalif1af6202010-05-04 15:01:34 -07003669 if (dev_state == QLA82XX_DEV_READY) {
Giridhar Malavalia9083012010-04-12 17:59:55 -07003670 qla_printk(KERN_INFO, ha, "HW State: NEED RESET\n");
3671 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3672 QLA82XX_DEV_NEED_RESET);
3673 } else
Giridhar Malavalif1af6202010-05-04 15:01:34 -07003674 qla_printk(KERN_INFO, ha, "HW State: %s\n",
3675 dev_state < MAX_STATES ?
3676 qdev_state[dev_state] : "Unknown");
Giridhar Malavalia9083012010-04-12 17:59:55 -07003677 qla82xx_idc_unlock(ha);
3678
3679 rval = qla82xx_device_state_handler(vha);
3680
3681 qla82xx_idc_lock(ha);
3682 qla82xx_clear_rst_ready(ha);
3683 qla82xx_idc_unlock(ha);
3684
Santosh Vernekarcdbb0a4f2010-05-28 15:08:25 -07003685 if (rval == QLA_SUCCESS) {
3686 ha->flags.fw_hung = 0;
Giridhar Malavalia9083012010-04-12 17:59:55 -07003687 qla82xx_restart_isp(vha);
Santosh Vernekarcdbb0a4f2010-05-28 15:08:25 -07003688 }
Giridhar Malavalif1af6202010-05-04 15:01:34 -07003689
3690 if (rval) {
3691 vha->flags.online = 1;
3692 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
3693 if (ha->isp_abort_cnt == 0) {
3694 qla_printk(KERN_WARNING, ha,
3695 "ISP error recovery failed - "
3696 "board disabled\n");
3697 /*
3698 * The next call disables the board
3699 * completely.
3700 */
3701 ha->isp_ops->reset_adapter(vha);
3702 vha->flags.online = 0;
3703 clear_bit(ISP_ABORT_RETRY,
3704 &vha->dpc_flags);
3705 rval = QLA_SUCCESS;
3706 } else { /* schedule another ISP abort */
3707 ha->isp_abort_cnt--;
3708 DEBUG(qla_printk(KERN_INFO, ha,
3709 "qla%ld: ISP abort - retry remaining %d\n",
3710 vha->host_no, ha->isp_abort_cnt));
3711 rval = QLA_FUNCTION_FAILED;
3712 }
3713 } else {
3714 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
3715 DEBUG(qla_printk(KERN_INFO, ha,
3716 "(%ld): ISP error recovery - retrying (%d) "
3717 "more times\n", vha->host_no, ha->isp_abort_cnt));
3718 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3719 rval = QLA_FUNCTION_FAILED;
3720 }
3721 }
Giridhar Malavalia9083012010-04-12 17:59:55 -07003722 return rval;
3723}
3724
3725/*
3726 * qla82xx_fcoe_ctx_reset
3727 * Perform a quick reset and aborts all outstanding commands.
3728 * This will only perform an FCoE context reset and avoids a full blown
3729 * chip reset.
3730 *
3731 * Input:
3732 * ha = adapter block pointer.
3733 * is_reset_path = flag for identifying the reset path.
3734 *
3735 * Returns:
3736 * 0 = success
3737 */
3738int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *vha)
3739{
3740 int rval = QLA_FUNCTION_FAILED;
3741
3742 if (vha->flags.online) {
3743 /* Abort all outstanding commands, so as to be requeued later */
3744 qla2x00_abort_isp_cleanup(vha);
3745 }
3746
3747 /* Stop currently executing firmware.
3748 * This will destroy existing FCoE context at the F/W end.
3749 */
3750 qla2x00_try_to_stop_firmware(vha);
3751
3752 /* Restart. Creates a new FCoE context on INIT_FIRMWARE. */
3753 rval = qla82xx_restart_isp(vha);
3754
3755 return rval;
3756}
3757
3758/*
3759 * qla2x00_wait_for_fcoe_ctx_reset
3760 * Wait till the FCoE context is reset.
3761 *
3762 * Note:
3763 * Does context switching here.
3764 * Release SPIN_LOCK (if any) before calling this routine.
3765 *
3766 * Return:
3767 * Success (fcoe_ctx reset is done) : 0
3768 * Failed (fcoe_ctx reset not completed within max loop timout ) : 1
3769 */
3770int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha)
3771{
3772 int status = QLA_FUNCTION_FAILED;
3773 unsigned long wait_reset;
3774
3775 wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ);
3776 while ((test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
3777 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
3778 && time_before(jiffies, wait_reset)) {
3779
3780 set_current_state(TASK_UNINTERRUPTIBLE);
3781 schedule_timeout(HZ);
3782
3783 if (!test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) &&
3784 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
3785 status = QLA_SUCCESS;
3786 break;
3787 }
3788 }
3789 DEBUG2(printk(KERN_INFO
3790 "%s status=%d\n", __func__, status));
3791
3792 return status;
3793}