blob: 9050c6f3f6bde946ae089e957390b12033e32fbf [file] [log] [blame]
James Bottomley2908d772006-08-29 09:22:51 -05001/*
2 * Aic94xx SAS/SATA driver sequencer interface.
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * Parts of this code adapted from David Chaw's adp94xx_seq.c.
8 *
9 * This file is licensed under GPLv2.
10 *
11 * This file is part of the aic94xx driver.
12 *
13 * The aic94xx driver is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License as
15 * published by the Free Software Foundation; version 2 of the
16 * License.
17 *
18 * The aic94xx driver is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 * General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with the aic94xx driver; if not, write to the Free Software
25 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
26 *
27 */
28
29#include <linux/delay.h>
30#include <linux/pci.h>
31#include <linux/firmware.h>
32#include "aic94xx_reg.h"
33#include "aic94xx_hwi.h"
34
35#include "aic94xx_seq.h"
36#include "aic94xx_dump.h"
37
38/* It takes no more than 0.05 us for an instruction
39 * to complete. So waiting for 1 us should be more than
40 * plenty.
41 */
42#define PAUSE_DELAY 1
43#define PAUSE_TRIES 1000
44
45static const struct firmware *sequencer_fw;
46static const char *sequencer_version;
47static u16 cseq_vecs[CSEQ_NUM_VECS], lseq_vecs[LSEQ_NUM_VECS], mode2_task,
48 cseq_idle_loop, lseq_idle_loop;
49static u8 *cseq_code, *lseq_code;
50static u32 cseq_code_size, lseq_code_size;
51
52static u16 first_scb_site_no = 0xFFFF;
53static u16 last_scb_site_no;
54
55/* ---------- Pause/Unpause CSEQ/LSEQ ---------- */
56
57/**
58 * asd_pause_cseq - pause the central sequencer
59 * @asd_ha: pointer to host adapter structure
60 *
61 * Return 0 on success, negative on failure.
62 */
63int asd_pause_cseq(struct asd_ha_struct *asd_ha)
64{
65 int count = PAUSE_TRIES;
66 u32 arp2ctl;
67
68 arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL);
69 if (arp2ctl & PAUSED)
70 return 0;
71
72 asd_write_reg_dword(asd_ha, CARP2CTL, arp2ctl | EPAUSE);
73 do {
74 arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL);
75 if (arp2ctl & PAUSED)
76 return 0;
77 udelay(PAUSE_DELAY);
78 } while (--count > 0);
79
80 ASD_DPRINTK("couldn't pause CSEQ\n");
81 return -1;
82}
83
84/**
85 * asd_unpause_cseq - unpause the central sequencer.
86 * @asd_ha: pointer to host adapter structure.
87 *
88 * Return 0 on success, negative on error.
89 */
90int asd_unpause_cseq(struct asd_ha_struct *asd_ha)
91{
92 u32 arp2ctl;
93 int count = PAUSE_TRIES;
94
95 arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL);
96 if (!(arp2ctl & PAUSED))
97 return 0;
98
99 asd_write_reg_dword(asd_ha, CARP2CTL, arp2ctl & ~EPAUSE);
100 do {
101 arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL);
102 if (!(arp2ctl & PAUSED))
103 return 0;
104 udelay(PAUSE_DELAY);
105 } while (--count > 0);
106
107 ASD_DPRINTK("couldn't unpause the CSEQ\n");
108 return -1;
109}
110
111/**
112 * asd_seq_pause_lseq - pause a link sequencer
113 * @asd_ha: pointer to a host adapter structure
114 * @lseq: link sequencer of interest
115 *
116 * Return 0 on success, negative on error.
117 */
118static inline int asd_seq_pause_lseq(struct asd_ha_struct *asd_ha, int lseq)
119{
120 u32 arp2ctl;
121 int count = PAUSE_TRIES;
122
123 arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq));
124 if (arp2ctl & PAUSED)
125 return 0;
126
127 asd_write_reg_dword(asd_ha, LmARP2CTL(lseq), arp2ctl | EPAUSE);
128 do {
129 arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq));
130 if (arp2ctl & PAUSED)
131 return 0;
132 udelay(PAUSE_DELAY);
133 } while (--count > 0);
134
135 ASD_DPRINTK("couldn't pause LSEQ %d\n", lseq);
136 return -1;
137}
138
139/**
140 * asd_pause_lseq - pause the link sequencer(s)
141 * @asd_ha: pointer to host adapter structure
142 * @lseq_mask: mask of link sequencers of interest
143 *
144 * Return 0 on success, negative on failure.
145 */
146int asd_pause_lseq(struct asd_ha_struct *asd_ha, u8 lseq_mask)
147{
148 int lseq;
149 int err = 0;
150
151 for_each_sequencer(lseq_mask, lseq_mask, lseq) {
152 err = asd_seq_pause_lseq(asd_ha, lseq);
153 if (err)
154 return err;
155 }
156
157 return err;
158}
159
160/**
161 * asd_seq_unpause_lseq - unpause a link sequencer
162 * @asd_ha: pointer to host adapter structure
163 * @lseq: link sequencer of interest
164 *
165 * Return 0 on success, negative on error.
166 */
167static inline int asd_seq_unpause_lseq(struct asd_ha_struct *asd_ha, int lseq)
168{
169 u32 arp2ctl;
170 int count = PAUSE_TRIES;
171
172 arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq));
173 if (!(arp2ctl & PAUSED))
174 return 0;
175
176 asd_write_reg_dword(asd_ha, LmARP2CTL(lseq), arp2ctl & ~EPAUSE);
177 do {
178 arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq));
179 if (!(arp2ctl & PAUSED))
180 return 0;
181 udelay(PAUSE_DELAY);
182 } while (--count > 0);
183
184 ASD_DPRINTK("couldn't unpause LSEQ %d\n", lseq);
185 return 0;
186}
187
188
189/**
190 * asd_unpause_lseq - unpause the link sequencer(s)
191 * @asd_ha: pointer to host adapter structure
192 * @lseq_mask: mask of link sequencers of interest
193 *
194 * Return 0 on success, negative on failure.
195 */
196int asd_unpause_lseq(struct asd_ha_struct *asd_ha, u8 lseq_mask)
197{
198 int lseq;
199 int err = 0;
200
201 for_each_sequencer(lseq_mask, lseq_mask, lseq) {
202 err = asd_seq_unpause_lseq(asd_ha, lseq);
203 if (err)
204 return err;
205 }
206
207 return err;
208}
209
210/* ---------- Downloading CSEQ/LSEQ microcode ---------- */
211
212static int asd_verify_cseq(struct asd_ha_struct *asd_ha, const u8 *_prog,
213 u32 size)
214{
215 u32 addr = CSEQ_RAM_REG_BASE_ADR;
216 const u32 *prog = (u32 *) _prog;
217 u32 i;
218
219 for (i = 0; i < size; i += 4, prog++, addr += 4) {
220 u32 val = asd_read_reg_dword(asd_ha, addr);
221
222 if (le32_to_cpu(*prog) != val) {
223 asd_printk("%s: cseq verify failed at %u "
224 "read:0x%x, wanted:0x%x\n",
225 pci_name(asd_ha->pcidev),
226 i, val, le32_to_cpu(*prog));
227 return -1;
228 }
229 }
230 ASD_DPRINTK("verified %d bytes, passed\n", size);
231 return 0;
232}
233
234/**
235 * asd_verify_lseq - verify the microcode of a link sequencer
236 * @asd_ha: pointer to host adapter structure
237 * @_prog: pointer to the microcode
238 * @size: size of the microcode in bytes
239 * @lseq: link sequencer of interest
240 *
241 * The link sequencer code is accessed in 4 KB pages, which are selected
242 * by setting LmRAMPAGE (bits 8 and 9) of the LmBISTCTL1 register.
243 * The 10 KB LSEQm instruction code is mapped, page at a time, at
244 * LmSEQRAM address.
245 */
246static int asd_verify_lseq(struct asd_ha_struct *asd_ha, const u8 *_prog,
247 u32 size, int lseq)
248{
249#define LSEQ_CODEPAGE_SIZE 4096
250 int pages = (size + LSEQ_CODEPAGE_SIZE - 1) / LSEQ_CODEPAGE_SIZE;
251 u32 page;
252 const u32 *prog = (u32 *) _prog;
253
254 for (page = 0; page < pages; page++) {
255 u32 i;
256
257 asd_write_reg_dword(asd_ha, LmBISTCTL1(lseq),
258 page << LmRAMPAGE_LSHIFT);
259 for (i = 0; size > 0 && i < LSEQ_CODEPAGE_SIZE;
260 i += 4, prog++, size-=4) {
261
262 u32 val = asd_read_reg_dword(asd_ha, LmSEQRAM(lseq)+i);
263
264 if (le32_to_cpu(*prog) != val) {
265 asd_printk("%s: LSEQ%d verify failed "
266 "page:%d, offs:%d\n",
267 pci_name(asd_ha->pcidev),
268 lseq, page, i);
269 return -1;
270 }
271 }
272 }
273 ASD_DPRINTK("LSEQ%d verified %d bytes, passed\n", lseq,
274 (int)((u8 *)prog-_prog));
275 return 0;
276}
277
278/**
279 * asd_verify_seq -- verify CSEQ/LSEQ microcode
280 * @asd_ha: pointer to host adapter structure
281 * @prog: pointer to microcode
282 * @size: size of the microcode
283 * @lseq_mask: if 0, verify CSEQ microcode, else mask of LSEQs of interest
284 *
285 * Return 0 if microcode is correct, negative on mismatch.
286 */
287static int asd_verify_seq(struct asd_ha_struct *asd_ha, const u8 *prog,
288 u32 size, u8 lseq_mask)
289{
290 if (lseq_mask == 0)
291 return asd_verify_cseq(asd_ha, prog, size);
292 else {
293 int lseq, err;
294
295 for_each_sequencer(lseq_mask, lseq_mask, lseq) {
296 err = asd_verify_lseq(asd_ha, prog, size, lseq);
297 if (err)
298 return err;
299 }
300 }
301
302 return 0;
303}
304#define ASD_DMA_MODE_DOWNLOAD
305#ifdef ASD_DMA_MODE_DOWNLOAD
306/* This is the size of the CSEQ Mapped instruction page */
307#define MAX_DMA_OVLY_COUNT ((1U << 14)-1)
308static int asd_download_seq(struct asd_ha_struct *asd_ha,
309 const u8 * const prog, u32 size, u8 lseq_mask)
310{
311 u32 comstaten;
312 u32 reg;
313 int page;
314 const int pages = (size + MAX_DMA_OVLY_COUNT - 1) / MAX_DMA_OVLY_COUNT;
315 struct asd_dma_tok *token;
316 int err = 0;
317
318 if (size % 4) {
319 asd_printk("sequencer program not multiple of 4\n");
320 return -1;
321 }
322
323 asd_pause_cseq(asd_ha);
324 asd_pause_lseq(asd_ha, 0xFF);
325
326 /* save, disable and clear interrupts */
327 comstaten = asd_read_reg_dword(asd_ha, COMSTATEN);
328 asd_write_reg_dword(asd_ha, COMSTATEN, 0);
329 asd_write_reg_dword(asd_ha, COMSTAT, COMSTAT_MASK);
330
331 asd_write_reg_dword(asd_ha, CHIMINTEN, RST_CHIMINTEN);
332 asd_write_reg_dword(asd_ha, CHIMINT, CHIMINT_MASK);
333
334 token = asd_alloc_coherent(asd_ha, MAX_DMA_OVLY_COUNT, GFP_KERNEL);
335 if (!token) {
336 asd_printk("out of memory for dma SEQ download\n");
337 err = -ENOMEM;
338 goto out;
339 }
340 ASD_DPRINTK("dma-ing %d bytes\n", size);
341
342 for (page = 0; page < pages; page++) {
343 int i;
344 u32 left = min(size-page*MAX_DMA_OVLY_COUNT,
345 (u32)MAX_DMA_OVLY_COUNT);
346
347 memcpy(token->vaddr, prog + page*MAX_DMA_OVLY_COUNT, left);
348 asd_write_reg_addr(asd_ha, OVLYDMAADR, token->dma_handle);
349 asd_write_reg_dword(asd_ha, OVLYDMACNT, left);
350 reg = !page ? RESETOVLYDMA : 0;
351 reg |= (STARTOVLYDMA | OVLYHALTERR);
352 reg |= (lseq_mask ? (((u32)lseq_mask) << 8) : OVLYCSEQ);
353 /* Start DMA. */
354 asd_write_reg_dword(asd_ha, OVLYDMACTL, reg);
355
356 for (i = PAUSE_TRIES*100; i > 0; i--) {
357 u32 dmadone = asd_read_reg_dword(asd_ha, OVLYDMACTL);
358 if (!(dmadone & OVLYDMAACT))
359 break;
360 udelay(PAUSE_DELAY);
361 }
362 }
363
364 reg = asd_read_reg_dword(asd_ha, COMSTAT);
365 if (!(reg & OVLYDMADONE) || (reg & OVLYERR)
366 || (asd_read_reg_dword(asd_ha, CHIMINT) & DEVEXCEPT_MASK)){
367 asd_printk("%s: error DMA-ing sequencer code\n",
368 pci_name(asd_ha->pcidev));
369 err = -ENODEV;
370 }
371
372 asd_free_coherent(asd_ha, token);
373 out:
374 asd_write_reg_dword(asd_ha, COMSTATEN, comstaten);
375
376 return err ? : asd_verify_seq(asd_ha, prog, size, lseq_mask);
377}
378#else /* ASD_DMA_MODE_DOWNLOAD */
379static int asd_download_seq(struct asd_ha_struct *asd_ha, const u8 *_prog,
380 u32 size, u8 lseq_mask)
381{
382 int i;
383 u32 reg = 0;
384 const u32 *prog = (u32 *) _prog;
385
386 if (size % 4) {
387 asd_printk("sequencer program not multiple of 4\n");
388 return -1;
389 }
390
391 asd_pause_cseq(asd_ha);
392 asd_pause_lseq(asd_ha, 0xFF);
393
394 reg |= (lseq_mask ? (((u32)lseq_mask) << 8) : OVLYCSEQ);
395 reg |= PIOCMODE;
396
397 asd_write_reg_dword(asd_ha, OVLYDMACNT, size);
398 asd_write_reg_dword(asd_ha, OVLYDMACTL, reg);
399
400 ASD_DPRINTK("downloading %s sequencer%s in PIO mode...\n",
401 lseq_mask ? "LSEQ" : "CSEQ", lseq_mask ? "s" : "");
402
403 for (i = 0; i < size; i += 4, prog++)
404 asd_write_reg_dword(asd_ha, SPIODATA, *prog);
405
406 reg = (reg & ~PIOCMODE) | OVLYHALTERR;
407 asd_write_reg_dword(asd_ha, OVLYDMACTL, reg);
408
409 return asd_verify_seq(asd_ha, _prog, size, lseq_mask);
410}
411#endif /* ASD_DMA_MODE_DOWNLOAD */
412
413/**
414 * asd_seq_download_seqs - download the sequencer microcode
415 * @asd_ha: pointer to host adapter structure
416 *
417 * Download the central and link sequencer microcode.
418 */
419static int asd_seq_download_seqs(struct asd_ha_struct *asd_ha)
420{
421 int err;
422
423 if (!asd_ha->hw_prof.enabled_phys) {
424 asd_printk("%s: no enabled phys!\n", pci_name(asd_ha->pcidev));
425 return -ENODEV;
426 }
427
428 /* Download the CSEQ */
429 ASD_DPRINTK("downloading CSEQ...\n");
430 err = asd_download_seq(asd_ha, cseq_code, cseq_code_size, 0);
431 if (err) {
432 asd_printk("CSEQ download failed:%d\n", err);
433 return err;
434 }
435
436 /* Download the Link Sequencers code. All of the Link Sequencers
437 * microcode can be downloaded at the same time.
438 */
439 ASD_DPRINTK("downloading LSEQs...\n");
440 err = asd_download_seq(asd_ha, lseq_code, lseq_code_size,
441 asd_ha->hw_prof.enabled_phys);
442 if (err) {
443 /* Try it one at a time */
444 u8 lseq;
445 u8 lseq_mask = asd_ha->hw_prof.enabled_phys;
446
447 for_each_sequencer(lseq_mask, lseq_mask, lseq) {
448 err = asd_download_seq(asd_ha, lseq_code,
449 lseq_code_size, 1<<lseq);
450 if (err)
451 break;
452 }
453 }
454 if (err)
455 asd_printk("LSEQs download failed:%d\n", err);
456
457 return err;
458}
459
460/* ---------- Initializing the chip, chip memory, etc. ---------- */
461
462/**
463 * asd_init_cseq_mip - initialize CSEQ mode independent pages 4-7
464 * @asd_ha: pointer to host adapter structure
465 */
466static void asd_init_cseq_mip(struct asd_ha_struct *asd_ha)
467{
468 /* CSEQ Mode Independent, page 4 setup. */
469 asd_write_reg_word(asd_ha, CSEQ_Q_EXE_HEAD, 0xFFFF);
470 asd_write_reg_word(asd_ha, CSEQ_Q_EXE_TAIL, 0xFFFF);
471 asd_write_reg_word(asd_ha, CSEQ_Q_DONE_HEAD, 0xFFFF);
472 asd_write_reg_word(asd_ha, CSEQ_Q_DONE_TAIL, 0xFFFF);
473 asd_write_reg_word(asd_ha, CSEQ_Q_SEND_HEAD, 0xFFFF);
474 asd_write_reg_word(asd_ha, CSEQ_Q_SEND_TAIL, 0xFFFF);
475 asd_write_reg_word(asd_ha, CSEQ_Q_DMA2CHIM_HEAD, 0xFFFF);
476 asd_write_reg_word(asd_ha, CSEQ_Q_DMA2CHIM_TAIL, 0xFFFF);
477 asd_write_reg_word(asd_ha, CSEQ_Q_COPY_HEAD, 0xFFFF);
478 asd_write_reg_word(asd_ha, CSEQ_Q_COPY_TAIL, 0xFFFF);
479 asd_write_reg_word(asd_ha, CSEQ_REG0, 0);
480 asd_write_reg_word(asd_ha, CSEQ_REG1, 0);
481 asd_write_reg_dword(asd_ha, CSEQ_REG2, 0);
482 asd_write_reg_byte(asd_ha, CSEQ_LINK_CTL_Q_MAP, 0);
483 {
484 u8 con = asd_read_reg_byte(asd_ha, CCONEXIST);
485 u8 val = hweight8(con);
486 asd_write_reg_byte(asd_ha, CSEQ_MAX_CSEQ_MODE, (val<<4)|val);
487 }
488 asd_write_reg_word(asd_ha, CSEQ_FREE_LIST_HACK_COUNT, 0);
489
490 /* CSEQ Mode independent, page 5 setup. */
491 asd_write_reg_dword(asd_ha, CSEQ_EST_NEXUS_REQ_QUEUE, 0);
492 asd_write_reg_dword(asd_ha, CSEQ_EST_NEXUS_REQ_QUEUE+4, 0);
493 asd_write_reg_dword(asd_ha, CSEQ_EST_NEXUS_REQ_COUNT, 0);
494 asd_write_reg_dword(asd_ha, CSEQ_EST_NEXUS_REQ_COUNT+4, 0);
495 asd_write_reg_word(asd_ha, CSEQ_Q_EST_NEXUS_HEAD, 0xFFFF);
496 asd_write_reg_word(asd_ha, CSEQ_Q_EST_NEXUS_TAIL, 0xFFFF);
497 asd_write_reg_word(asd_ha, CSEQ_NEED_EST_NEXUS_SCB, 0);
498 asd_write_reg_byte(asd_ha, CSEQ_EST_NEXUS_REQ_HEAD, 0);
499 asd_write_reg_byte(asd_ha, CSEQ_EST_NEXUS_REQ_TAIL, 0);
500 asd_write_reg_byte(asd_ha, CSEQ_EST_NEXUS_SCB_OFFSET, 0);
501
502 /* CSEQ Mode independent, page 6 setup. */
503 asd_write_reg_word(asd_ha, CSEQ_INT_ROUT_RET_ADDR0, 0);
504 asd_write_reg_word(asd_ha, CSEQ_INT_ROUT_RET_ADDR1, 0);
505 asd_write_reg_word(asd_ha, CSEQ_INT_ROUT_SCBPTR, 0);
506 asd_write_reg_byte(asd_ha, CSEQ_INT_ROUT_MODE, 0);
507 asd_write_reg_byte(asd_ha, CSEQ_ISR_SCRATCH_FLAGS, 0);
508 asd_write_reg_word(asd_ha, CSEQ_ISR_SAVE_SINDEX, 0);
509 asd_write_reg_word(asd_ha, CSEQ_ISR_SAVE_DINDEX, 0);
510 asd_write_reg_word(asd_ha, CSEQ_Q_MONIRTT_HEAD, 0xFFFF);
511 asd_write_reg_word(asd_ha, CSEQ_Q_MONIRTT_TAIL, 0xFFFF);
512 /* Calculate the free scb mask. */
513 {
514 u16 cmdctx = asd_get_cmdctx_size(asd_ha);
515 cmdctx = (~((cmdctx/128)-1)) >> 8;
516 asd_write_reg_byte(asd_ha, CSEQ_FREE_SCB_MASK, (u8)cmdctx);
517 }
518 asd_write_reg_word(asd_ha, CSEQ_BUILTIN_FREE_SCB_HEAD,
519 first_scb_site_no);
520 asd_write_reg_word(asd_ha, CSEQ_BUILTIN_FREE_SCB_TAIL,
521 last_scb_site_no);
522 asd_write_reg_word(asd_ha, CSEQ_EXTENDED_FREE_SCB_HEAD, 0xFFFF);
523 asd_write_reg_word(asd_ha, CSEQ_EXTENDED_FREE_SCB_TAIL, 0xFFFF);
524
525 /* CSEQ Mode independent, page 7 setup. */
526 asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_QUEUE, 0);
527 asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_QUEUE+4, 0);
528 asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_COUNT, 0);
529 asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_COUNT+4, 0);
530 asd_write_reg_word(asd_ha, CSEQ_Q_EMPTY_HEAD, 0xFFFF);
531 asd_write_reg_word(asd_ha, CSEQ_Q_EMPTY_TAIL, 0xFFFF);
532 asd_write_reg_word(asd_ha, CSEQ_NEED_EMPTY_SCB, 0);
533 asd_write_reg_byte(asd_ha, CSEQ_EMPTY_REQ_HEAD, 0);
534 asd_write_reg_byte(asd_ha, CSEQ_EMPTY_REQ_TAIL, 0);
535 asd_write_reg_byte(asd_ha, CSEQ_EMPTY_SCB_OFFSET, 0);
536 asd_write_reg_word(asd_ha, CSEQ_PRIMITIVE_DATA, 0);
537 asd_write_reg_dword(asd_ha, CSEQ_TIMEOUT_CONST, 0);
538}
539
540/**
541 * asd_init_cseq_mdp - initialize CSEQ Mode dependent pages
542 * @asd_ha: pointer to host adapter structure
543 */
544static void asd_init_cseq_mdp(struct asd_ha_struct *asd_ha)
545{
546 int i;
547 int moffs;
548
549 moffs = CSEQ_PAGE_SIZE * 2;
550
551 /* CSEQ Mode dependent, modes 0-7, page 0 setup. */
552 for (i = 0; i < 8; i++) {
553 asd_write_reg_word(asd_ha, i*moffs+CSEQ_LRM_SAVE_SINDEX, 0);
554 asd_write_reg_word(asd_ha, i*moffs+CSEQ_LRM_SAVE_SCBPTR, 0);
555 asd_write_reg_word(asd_ha, i*moffs+CSEQ_Q_LINK_HEAD, 0xFFFF);
556 asd_write_reg_word(asd_ha, i*moffs+CSEQ_Q_LINK_TAIL, 0xFFFF);
557 asd_write_reg_byte(asd_ha, i*moffs+CSEQ_LRM_SAVE_SCRPAGE, 0);
558 }
559
560 /* CSEQ Mode dependent, mode 0-7, page 1 and 2 shall be ignored. */
561
562 /* CSEQ Mode dependent, mode 8, page 0 setup. */
563 asd_write_reg_word(asd_ha, CSEQ_RET_ADDR, 0xFFFF);
564 asd_write_reg_word(asd_ha, CSEQ_RET_SCBPTR, 0);
565 asd_write_reg_word(asd_ha, CSEQ_SAVE_SCBPTR, 0);
566 asd_write_reg_word(asd_ha, CSEQ_EMPTY_TRANS_CTX, 0);
567 asd_write_reg_word(asd_ha, CSEQ_RESP_LEN, 0);
568 asd_write_reg_word(asd_ha, CSEQ_TMF_SCBPTR, 0);
569 asd_write_reg_word(asd_ha, CSEQ_GLOBAL_PREV_SCB, 0);
570 asd_write_reg_word(asd_ha, CSEQ_GLOBAL_HEAD, 0);
571 asd_write_reg_word(asd_ha, CSEQ_CLEAR_LU_HEAD, 0);
572 asd_write_reg_byte(asd_ha, CSEQ_TMF_OPCODE, 0);
573 asd_write_reg_byte(asd_ha, CSEQ_SCRATCH_FLAGS, 0);
574 asd_write_reg_word(asd_ha, CSEQ_HSB_SITE, 0);
575 asd_write_reg_word(asd_ha, CSEQ_FIRST_INV_SCB_SITE,
576 (u16)last_scb_site_no+1);
577 asd_write_reg_word(asd_ha, CSEQ_FIRST_INV_DDB_SITE,
578 (u16)asd_ha->hw_prof.max_ddbs);
579
580 /* CSEQ Mode dependent, mode 8, page 1 setup. */
581 asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CLEAR, 0);
582 asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CLEAR + 4, 0);
583 asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CHECK, 0);
584 asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CHECK + 4, 0);
585
586 /* CSEQ Mode dependent, mode 8, page 2 setup. */
587 /* Tell the sequencer the bus address of the first SCB. */
588 asd_write_reg_addr(asd_ha, CSEQ_HQ_NEW_POINTER,
589 asd_ha->seq.next_scb.dma_handle);
590 ASD_DPRINTK("First SCB dma_handle: 0x%llx\n",
591 (unsigned long long)asd_ha->seq.next_scb.dma_handle);
592
593 /* Tell the sequencer the first Done List entry address. */
594 asd_write_reg_addr(asd_ha, CSEQ_HQ_DONE_BASE,
595 asd_ha->seq.actual_dl->dma_handle);
596
597 /* Initialize the Q_DONE_POINTER with the least significant
598 * 4 bytes of the first Done List address. */
599 asd_write_reg_dword(asd_ha, CSEQ_HQ_DONE_POINTER,
600 ASD_BUSADDR_LO(asd_ha->seq.actual_dl->dma_handle));
601
602 asd_write_reg_byte(asd_ha, CSEQ_HQ_DONE_PASS, ASD_DEF_DL_TOGGLE);
603
604 /* CSEQ Mode dependent, mode 8, page 3 shall be ignored. */
605}
606
607/**
608 * asd_init_cseq_scratch -- setup and init CSEQ
609 * @asd_ha: pointer to host adapter structure
610 *
611 * Setup and initialize Central sequencers. Initialiaze the mode
612 * independent and dependent scratch page to the default settings.
613 */
614static void asd_init_cseq_scratch(struct asd_ha_struct *asd_ha)
615{
616 asd_init_cseq_mip(asd_ha);
617 asd_init_cseq_mdp(asd_ha);
618}
619
620/**
621 * asd_init_lseq_mip -- initialize LSEQ Mode independent pages 0-3
622 * @asd_ha: pointer to host adapter structure
623 */
624static void asd_init_lseq_mip(struct asd_ha_struct *asd_ha, u8 lseq)
625{
626 int i;
627
628 /* LSEQ Mode independent page 0 setup. */
629 asd_write_reg_word(asd_ha, LmSEQ_Q_TGTXFR_HEAD(lseq), 0xFFFF);
630 asd_write_reg_word(asd_ha, LmSEQ_Q_TGTXFR_TAIL(lseq), 0xFFFF);
631 asd_write_reg_byte(asd_ha, LmSEQ_LINK_NUMBER(lseq), lseq);
632 asd_write_reg_byte(asd_ha, LmSEQ_SCRATCH_FLAGS(lseq),
633 ASD_NOTIFY_ENABLE_SPINUP);
634 asd_write_reg_dword(asd_ha, LmSEQ_CONNECTION_STATE(lseq),0x08000000);
635 asd_write_reg_word(asd_ha, LmSEQ_CONCTL(lseq), 0);
636 asd_write_reg_byte(asd_ha, LmSEQ_CONSTAT(lseq), 0);
637 asd_write_reg_byte(asd_ha, LmSEQ_CONNECTION_MODES(lseq), 0);
638 asd_write_reg_word(asd_ha, LmSEQ_REG1_ISR(lseq), 0);
639 asd_write_reg_word(asd_ha, LmSEQ_REG2_ISR(lseq), 0);
640 asd_write_reg_word(asd_ha, LmSEQ_REG3_ISR(lseq), 0);
641 asd_write_reg_dword(asd_ha, LmSEQ_REG0_ISR(lseq), 0);
642 asd_write_reg_dword(asd_ha, LmSEQ_REG0_ISR(lseq)+4, 0);
643
644 /* LSEQ Mode independent page 1 setup. */
645 asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR0(lseq), 0xFFFF);
646 asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR1(lseq), 0xFFFF);
647 asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR2(lseq), 0xFFFF);
648 asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR3(lseq), 0xFFFF);
649 asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE0(lseq), 0);
650 asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE1(lseq), 0);
651 asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE2(lseq), 0);
652 asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE3(lseq), 0);
653 asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_HEAD(lseq), 0);
654 asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_TAIL(lseq), 0);
655 asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_BUF_AVAIL(lseq), 0);
656 asd_write_reg_dword(asd_ha, LmSEQ_TIMEOUT_CONST(lseq), 0);
657 asd_write_reg_word(asd_ha, LmSEQ_ISR_SAVE_SINDEX(lseq), 0);
658 asd_write_reg_word(asd_ha, LmSEQ_ISR_SAVE_DINDEX(lseq), 0);
659
660 /* LSEQ Mode Independent page 2 setup. */
661 asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR0(lseq), 0xFFFF);
662 asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR1(lseq), 0xFFFF);
663 asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR2(lseq), 0xFFFF);
664 asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR3(lseq), 0xFFFF);
665 asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD0(lseq), 0);
666 asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD1(lseq), 0);
667 asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD2(lseq), 0);
668 asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD3(lseq), 0);
669 asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_HEAD(lseq), 0);
670 asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_TAIL(lseq), 0);
671 asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_BUFS_AVAIL(lseq), 0);
672 for (i = 0; i < 12; i += 4)
673 asd_write_reg_dword(asd_ha, LmSEQ_ATA_SCR_REGS(lseq) + i, 0);
674
675 /* LSEQ Mode Independent page 3 setup. */
676
677 /* Device present timer timeout */
678 asd_write_reg_dword(asd_ha, LmSEQ_DEV_PRES_TMR_TOUT_CONST(lseq),
679 ASD_DEV_PRESENT_TIMEOUT);
680
681 /* SATA interlock timer disabled */
682 asd_write_reg_dword(asd_ha, LmSEQ_SATA_INTERLOCK_TIMEOUT(lseq),
683 ASD_SATA_INTERLOCK_TIMEOUT);
684
685 /* STP shutdown timer timeout constant, IGNORED by the sequencer,
686 * always 0. */
687 asd_write_reg_dword(asd_ha, LmSEQ_STP_SHUTDOWN_TIMEOUT(lseq),
688 ASD_STP_SHUTDOWN_TIMEOUT);
689
690 asd_write_reg_dword(asd_ha, LmSEQ_SRST_ASSERT_TIMEOUT(lseq),
691 ASD_SRST_ASSERT_TIMEOUT);
692
693 asd_write_reg_dword(asd_ha, LmSEQ_RCV_FIS_TIMEOUT(lseq),
694 ASD_RCV_FIS_TIMEOUT);
695
696 asd_write_reg_dword(asd_ha, LmSEQ_ONE_MILLISEC_TIMEOUT(lseq),
697 ASD_ONE_MILLISEC_TIMEOUT);
698
699 /* COM_INIT timer */
700 asd_write_reg_dword(asd_ha, LmSEQ_TEN_MS_COMINIT_TIMEOUT(lseq),
701 ASD_TEN_MILLISEC_TIMEOUT);
702
703 asd_write_reg_dword(asd_ha, LmSEQ_SMP_RCV_TIMEOUT(lseq),
704 ASD_SMP_RCV_TIMEOUT);
705}
706
707/**
708 * asd_init_lseq_mdp -- initialize LSEQ mode dependent pages.
709 * @asd_ha: pointer to host adapter structure
710 */
711static void asd_init_lseq_mdp(struct asd_ha_struct *asd_ha, int lseq)
712{
713 int i;
714 u32 moffs;
715 u16 ret_addr[] = {
716 0xFFFF, /* mode 0 */
717 0xFFFF, /* mode 1 */
718 mode2_task, /* mode 2 */
719 0,
720 0xFFFF, /* mode 4/5 */
721 0xFFFF, /* mode 4/5 */
722 };
723
724 /*
725 * Mode 0,1,2 and 4/5 have common field on page 0 for the first
726 * 14 bytes.
727 */
728 for (i = 0; i < 3; i++) {
729 moffs = i * LSEQ_MODE_SCRATCH_SIZE;
730 asd_write_reg_word(asd_ha, LmSEQ_RET_ADDR(lseq)+moffs,
731 ret_addr[i]);
732 asd_write_reg_word(asd_ha, LmSEQ_REG0_MODE(lseq)+moffs, 0);
733 asd_write_reg_word(asd_ha, LmSEQ_MODE_FLAGS(lseq)+moffs, 0);
734 asd_write_reg_word(asd_ha, LmSEQ_RET_ADDR2(lseq)+moffs,0xFFFF);
735 asd_write_reg_word(asd_ha, LmSEQ_RET_ADDR1(lseq)+moffs,0xFFFF);
736 asd_write_reg_byte(asd_ha, LmSEQ_OPCODE_TO_CSEQ(lseq)+moffs,0);
737 asd_write_reg_word(asd_ha, LmSEQ_DATA_TO_CSEQ(lseq)+moffs,0);
738 }
739 /*
740 * Mode 5 page 0 overlaps the same scratch page with Mode 0 page 3.
741 */
742 asd_write_reg_word(asd_ha,
743 LmSEQ_RET_ADDR(lseq)+LSEQ_MODE5_PAGE0_OFFSET,
744 ret_addr[5]);
745 asd_write_reg_word(asd_ha,
746 LmSEQ_REG0_MODE(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0);
747 asd_write_reg_word(asd_ha,
748 LmSEQ_MODE_FLAGS(lseq)+LSEQ_MODE5_PAGE0_OFFSET, 0);
749 asd_write_reg_word(asd_ha,
750 LmSEQ_RET_ADDR2(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0xFFFF);
751 asd_write_reg_word(asd_ha,
752 LmSEQ_RET_ADDR1(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0xFFFF);
753 asd_write_reg_byte(asd_ha,
754 LmSEQ_OPCODE_TO_CSEQ(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0);
755 asd_write_reg_word(asd_ha,
756 LmSEQ_DATA_TO_CSEQ(lseq)+LSEQ_MODE5_PAGE0_OFFSET, 0);
757
758 /* LSEQ Mode dependent 0, page 0 setup. */
759 asd_write_reg_word(asd_ha, LmSEQ_FIRST_INV_DDB_SITE(lseq),
760 (u16)asd_ha->hw_prof.max_ddbs);
761 asd_write_reg_word(asd_ha, LmSEQ_EMPTY_TRANS_CTX(lseq), 0);
762 asd_write_reg_word(asd_ha, LmSEQ_RESP_LEN(lseq), 0);
763 asd_write_reg_word(asd_ha, LmSEQ_FIRST_INV_SCB_SITE(lseq),
764 (u16)last_scb_site_no+1);
765 asd_write_reg_word(asd_ha, LmSEQ_INTEN_SAVE(lseq),
766 (u16) LmM0INTEN_MASK & 0xFFFF0000 >> 16);
767 asd_write_reg_word(asd_ha, LmSEQ_INTEN_SAVE(lseq) + 2,
768 (u16) LmM0INTEN_MASK & 0xFFFF);
769 asd_write_reg_byte(asd_ha, LmSEQ_LINK_RST_FRM_LEN(lseq), 0);
770 asd_write_reg_byte(asd_ha, LmSEQ_LINK_RST_PROTOCOL(lseq), 0);
771 asd_write_reg_byte(asd_ha, LmSEQ_RESP_STATUS(lseq), 0);
772 asd_write_reg_byte(asd_ha, LmSEQ_LAST_LOADED_SGE(lseq), 0);
773 asd_write_reg_word(asd_ha, LmSEQ_SAVE_SCBPTR(lseq), 0);
774
775 /* LSEQ mode dependent, mode 1, page 0 setup. */
776 asd_write_reg_word(asd_ha, LmSEQ_Q_XMIT_HEAD(lseq), 0xFFFF);
777 asd_write_reg_word(asd_ha, LmSEQ_M1_EMPTY_TRANS_CTX(lseq), 0);
778 asd_write_reg_word(asd_ha, LmSEQ_INI_CONN_TAG(lseq), 0);
779 asd_write_reg_byte(asd_ha, LmSEQ_FAILED_OPEN_STATUS(lseq), 0);
780 asd_write_reg_byte(asd_ha, LmSEQ_XMIT_REQUEST_TYPE(lseq), 0);
781 asd_write_reg_byte(asd_ha, LmSEQ_M1_RESP_STATUS(lseq), 0);
782 asd_write_reg_byte(asd_ha, LmSEQ_M1_LAST_LOADED_SGE(lseq), 0);
783 asd_write_reg_word(asd_ha, LmSEQ_M1_SAVE_SCBPTR(lseq), 0);
784
785 /* LSEQ Mode dependent mode 2, page 0 setup */
786 asd_write_reg_word(asd_ha, LmSEQ_PORT_COUNTER(lseq), 0);
787 asd_write_reg_word(asd_ha, LmSEQ_PM_TABLE_PTR(lseq), 0);
788 asd_write_reg_word(asd_ha, LmSEQ_SATA_INTERLOCK_TMR_SAVE(lseq), 0);
789 asd_write_reg_word(asd_ha, LmSEQ_IP_BITL(lseq), 0);
790 asd_write_reg_word(asd_ha, LmSEQ_COPY_SMP_CONN_TAG(lseq), 0);
791 asd_write_reg_byte(asd_ha, LmSEQ_P0M2_OFFS1AH(lseq), 0);
792
793 /* LSEQ Mode dependent, mode 4/5, page 0 setup. */
794 asd_write_reg_byte(asd_ha, LmSEQ_SAVED_OOB_STATUS(lseq), 0);
795 asd_write_reg_byte(asd_ha, LmSEQ_SAVED_OOB_MODE(lseq), 0);
796 asd_write_reg_word(asd_ha, LmSEQ_Q_LINK_HEAD(lseq), 0xFFFF);
797 asd_write_reg_byte(asd_ha, LmSEQ_LINK_RST_ERR(lseq), 0);
798 asd_write_reg_byte(asd_ha, LmSEQ_SAVED_OOB_SIGNALS(lseq), 0);
799 asd_write_reg_byte(asd_ha, LmSEQ_SAS_RESET_MODE(lseq), 0);
800 asd_write_reg_byte(asd_ha, LmSEQ_LINK_RESET_RETRY_COUNT(lseq), 0);
801 asd_write_reg_byte(asd_ha, LmSEQ_NUM_LINK_RESET_RETRIES(lseq), 0);
802 asd_write_reg_word(asd_ha, LmSEQ_OOB_INT_ENABLES(lseq), 0);
803 /*
804 * Set the desired interval between transmissions of the NOTIFY
805 * (ENABLE SPINUP) primitive. Must be initilized to val - 1.
806 */
807 asd_write_reg_word(asd_ha, LmSEQ_NOTIFY_TIMER_TIMEOUT(lseq),
808 ASD_NOTIFY_TIMEOUT - 1);
809 /* No delay for the first NOTIFY to be sent to the attached target. */
810 asd_write_reg_word(asd_ha, LmSEQ_NOTIFY_TIMER_DOWN_COUNT(lseq),
811 ASD_NOTIFY_DOWN_COUNT);
812
813 /* LSEQ Mode dependent, mode 0 and 1, page 1 setup. */
814 for (i = 0; i < 2; i++) {
815 int j;
816 /* Start from Page 1 of Mode 0 and 1. */
817 moffs = LSEQ_PAGE_SIZE + i*LSEQ_MODE_SCRATCH_SIZE;
818 /* All the fields of page 1 can be intialized to 0. */
819 for (j = 0; j < LSEQ_PAGE_SIZE; j += 4)
820 asd_write_reg_dword(asd_ha, LmSCRATCH(lseq)+moffs+j,0);
821 }
822
823 /* LSEQ Mode dependent, mode 2, page 1 setup. */
824 asd_write_reg_dword(asd_ha, LmSEQ_INVALID_DWORD_COUNT(lseq), 0);
825 asd_write_reg_dword(asd_ha, LmSEQ_DISPARITY_ERROR_COUNT(lseq), 0);
826 asd_write_reg_dword(asd_ha, LmSEQ_LOSS_OF_SYNC_COUNT(lseq), 0);
827
828 /* LSEQ Mode dependent, mode 4/5, page 1. */
829 for (i = 0; i < LSEQ_PAGE_SIZE; i+=4)
830 asd_write_reg_dword(asd_ha, LmSEQ_FRAME_TYPE_MASK(lseq)+i, 0);
831 asd_write_reg_byte(asd_ha, LmSEQ_FRAME_TYPE_MASK(lseq), 0xFF);
832 asd_write_reg_byte(asd_ha, LmSEQ_HASHED_DEST_ADDR_MASK(lseq), 0xFF);
833 asd_write_reg_byte(asd_ha, LmSEQ_HASHED_DEST_ADDR_MASK(lseq)+1,0xFF);
834 asd_write_reg_byte(asd_ha, LmSEQ_HASHED_DEST_ADDR_MASK(lseq)+2,0xFF);
835 asd_write_reg_byte(asd_ha, LmSEQ_HASHED_SRC_ADDR_MASK(lseq), 0xFF);
836 asd_write_reg_byte(asd_ha, LmSEQ_HASHED_SRC_ADDR_MASK(lseq)+1, 0xFF);
837 asd_write_reg_byte(asd_ha, LmSEQ_HASHED_SRC_ADDR_MASK(lseq)+2, 0xFF);
838 asd_write_reg_dword(asd_ha, LmSEQ_DATA_OFFSET(lseq), 0xFFFFFFFF);
839
840 /* LSEQ Mode dependent, mode 0, page 2 setup. */
841 asd_write_reg_dword(asd_ha, LmSEQ_SMP_RCV_TIMER_TERM_TS(lseq), 0);
842 asd_write_reg_byte(asd_ha, LmSEQ_DEVICE_BITS(lseq), 0);
843 asd_write_reg_word(asd_ha, LmSEQ_SDB_DDB(lseq), 0);
844 asd_write_reg_byte(asd_ha, LmSEQ_SDB_NUM_TAGS(lseq), 0);
845 asd_write_reg_byte(asd_ha, LmSEQ_SDB_CURR_TAG(lseq), 0);
846
847 /* LSEQ Mode Dependent 1, page 2 setup. */
848 asd_write_reg_dword(asd_ha, LmSEQ_TX_ID_ADDR_FRAME(lseq), 0);
849 asd_write_reg_dword(asd_ha, LmSEQ_TX_ID_ADDR_FRAME(lseq)+4, 0);
850 asd_write_reg_dword(asd_ha, LmSEQ_OPEN_TIMER_TERM_TS(lseq), 0);
851 asd_write_reg_dword(asd_ha, LmSEQ_SRST_AS_TIMER_TERM_TS(lseq), 0);
852 asd_write_reg_dword(asd_ha, LmSEQ_LAST_LOADED_SG_EL(lseq), 0);
853
854 /* LSEQ Mode Dependent 2, page 2 setup. */
855 /* The LmSEQ_STP_SHUTDOWN_TIMER_TERM_TS is IGNORED by the sequencer,
856 * i.e. always 0. */
857 asd_write_reg_dword(asd_ha, LmSEQ_STP_SHUTDOWN_TIMER_TERM_TS(lseq),0);
858 asd_write_reg_dword(asd_ha, LmSEQ_CLOSE_TIMER_TERM_TS(lseq), 0);
859 asd_write_reg_dword(asd_ha, LmSEQ_BREAK_TIMER_TERM_TS(lseq), 0);
860 asd_write_reg_dword(asd_ha, LmSEQ_DWS_RESET_TIMER_TERM_TS(lseq), 0);
861 asd_write_reg_dword(asd_ha,LmSEQ_SATA_INTERLOCK_TIMER_TERM_TS(lseq),0);
862 asd_write_reg_dword(asd_ha, LmSEQ_MCTL_TIMER_TERM_TS(lseq), 0);
863
864 /* LSEQ Mode Dependent 4/5, page 2 setup. */
865 asd_write_reg_dword(asd_ha, LmSEQ_COMINIT_TIMER_TERM_TS(lseq), 0);
866 asd_write_reg_dword(asd_ha, LmSEQ_RCV_ID_TIMER_TERM_TS(lseq), 0);
867 asd_write_reg_dword(asd_ha, LmSEQ_RCV_FIS_TIMER_TERM_TS(lseq), 0);
868 asd_write_reg_dword(asd_ha, LmSEQ_DEV_PRES_TIMER_TERM_TS(lseq), 0);
869}
870
871/**
872 * asd_init_lseq_scratch -- setup and init link sequencers
873 * @asd_ha: pointer to host adapter struct
874 */
875static void asd_init_lseq_scratch(struct asd_ha_struct *asd_ha)
876{
877 u8 lseq;
878 u8 lseq_mask;
879
880 lseq_mask = asd_ha->hw_prof.enabled_phys;
881 for_each_sequencer(lseq_mask, lseq_mask, lseq) {
882 asd_init_lseq_mip(asd_ha, lseq);
883 asd_init_lseq_mdp(asd_ha, lseq);
884 }
885}
886
887/**
888 * asd_init_scb_sites -- initialize sequencer SCB sites (memory).
889 * @asd_ha: pointer to host adapter structure
890 *
891 * This should be done before initializing common CSEQ and LSEQ
892 * scratch since those areas depend on some computed values here,
893 * last_scb_site_no, etc.
894 */
895static void asd_init_scb_sites(struct asd_ha_struct *asd_ha)
896{
897 u16 site_no;
898 u16 max_scbs = 0;
899
900 for (site_no = asd_ha->hw_prof.max_scbs-1;
901 site_no != (u16) -1;
902 site_no--) {
903 u16 i;
904
905 /* Initialize all fields in the SCB site to 0. */
906 for (i = 0; i < ASD_SCB_SIZE; i += 4)
907 asd_scbsite_write_dword(asd_ha, site_no, i, 0);
908
909 /* Workaround needed by SEQ to fix a SATA issue is to exclude
910 * certain SCB sites from the free list. */
911 if (!SCB_SITE_VALID(site_no))
912 continue;
913
914 if (last_scb_site_no == 0)
915 last_scb_site_no = site_no;
916
917 /* For every SCB site, we need to initialize the
918 * following fields: Q_NEXT, SCB_OPCODE, SCB_FLAGS,
919 * and SG Element Flag. */
920
921 /* Q_NEXT field of the last SCB is invalidated. */
922 asd_scbsite_write_word(asd_ha, site_no, 0, first_scb_site_no);
923
924 /* Initialize SCB Site Opcode field to invalid. */
925 asd_scbsite_write_byte(asd_ha, site_no,
926 offsetof(struct scb_header, opcode),
927 0xFF);
928
929 /* Initialize SCB Site Flags field to mean a response
930 * frame has been received. This means inadvertent
931 * frames received to be dropped. */
932 asd_scbsite_write_byte(asd_ha, site_no, 0x49, 0x01);
933
934 first_scb_site_no = site_no;
935 max_scbs++;
936 }
937 asd_ha->hw_prof.max_scbs = max_scbs;
938 ASD_DPRINTK("max_scbs:%d\n", asd_ha->hw_prof.max_scbs);
939 ASD_DPRINTK("first_scb_site_no:0x%x\n", first_scb_site_no);
940 ASD_DPRINTK("last_scb_site_no:0x%x\n", last_scb_site_no);
941}
942
943/**
944 * asd_init_cseq_cio - initialize CSEQ CIO registers
945 * @asd_ha: pointer to host adapter structure
946 */
947static void asd_init_cseq_cio(struct asd_ha_struct *asd_ha)
948{
949 int i;
950
951 asd_write_reg_byte(asd_ha, CSEQCOMINTEN, 0);
952 asd_write_reg_byte(asd_ha, CSEQDLCTL, ASD_DL_SIZE_BITS);
953 asd_write_reg_byte(asd_ha, CSEQDLOFFS, 0);
954 asd_write_reg_byte(asd_ha, CSEQDLOFFS+1, 0);
955 asd_ha->seq.scbpro = 0;
956 asd_write_reg_dword(asd_ha, SCBPRO, 0);
957 asd_write_reg_dword(asd_ha, CSEQCON, 0);
958
959 /* Intialize CSEQ Mode 11 Interrupt Vectors.
960 * The addresses are 16 bit wide and in dword units.
961 * The values of their macros are in byte units.
962 * Thus we have to divide by 4. */
963 asd_write_reg_word(asd_ha, CM11INTVEC0, cseq_vecs[0]);
964 asd_write_reg_word(asd_ha, CM11INTVEC1, cseq_vecs[1]);
965 asd_write_reg_word(asd_ha, CM11INTVEC2, cseq_vecs[2]);
966
967 /* Enable ARP2HALTC (ARP2 Halted from Halt Code Write). */
968 asd_write_reg_byte(asd_ha, CARP2INTEN, EN_ARP2HALTC);
969
970 /* Initialize CSEQ Scratch Page to 0x04. */
971 asd_write_reg_byte(asd_ha, CSCRATCHPAGE, 0x04);
972
973 /* Initialize CSEQ Mode[0-8] Dependent registers. */
974 /* Initialize Scratch Page to 0. */
975 for (i = 0; i < 9; i++)
976 asd_write_reg_byte(asd_ha, CMnSCRATCHPAGE(i), 0);
977
978 /* Reset the ARP2 Program Count. */
979 asd_write_reg_word(asd_ha, CPRGMCNT, cseq_idle_loop);
980
981 for (i = 0; i < 8; i++) {
982 /* Intialize Mode n Link m Interrupt Enable. */
983 asd_write_reg_dword(asd_ha, CMnINTEN(i), EN_CMnRSPMBXF);
984 /* Initialize Mode n Request Mailbox. */
985 asd_write_reg_dword(asd_ha, CMnREQMBX(i), 0);
986 }
987}
988
989/**
990 * asd_init_lseq_cio -- initialize LmSEQ CIO registers
991 * @asd_ha: pointer to host adapter structure
992 */
993static void asd_init_lseq_cio(struct asd_ha_struct *asd_ha, int lseq)
994{
995 u8 *sas_addr;
996 int i;
997
998 /* Enable ARP2HALTC (ARP2 Halted from Halt Code Write). */
999 asd_write_reg_dword(asd_ha, LmARP2INTEN(lseq), EN_ARP2HALTC);
1000
1001 asd_write_reg_byte(asd_ha, LmSCRATCHPAGE(lseq), 0);
1002
1003 /* Initialize Mode 0,1, and 2 SCRATCHPAGE to 0. */
1004 for (i = 0; i < 3; i++)
1005 asd_write_reg_byte(asd_ha, LmMnSCRATCHPAGE(lseq, i), 0);
1006
1007 /* Initialize Mode 5 SCRATCHPAGE to 0. */
1008 asd_write_reg_byte(asd_ha, LmMnSCRATCHPAGE(lseq, 5), 0);
1009
1010 asd_write_reg_dword(asd_ha, LmRSPMBX(lseq), 0);
1011 /* Initialize Mode 0,1,2 and 5 Interrupt Enable and
1012 * Interrupt registers. */
1013 asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 0), LmM0INTEN_MASK);
1014 asd_write_reg_dword(asd_ha, LmMnINT(lseq, 0), 0xFFFFFFFF);
1015 /* Mode 1 */
1016 asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 1), LmM1INTEN_MASK);
1017 asd_write_reg_dword(asd_ha, LmMnINT(lseq, 1), 0xFFFFFFFF);
1018 /* Mode 2 */
1019 asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 2), LmM2INTEN_MASK);
1020 asd_write_reg_dword(asd_ha, LmMnINT(lseq, 2), 0xFFFFFFFF);
1021 /* Mode 5 */
1022 asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 5), LmM5INTEN_MASK);
1023 asd_write_reg_dword(asd_ha, LmMnINT(lseq, 5), 0xFFFFFFFF);
1024
1025 /* Enable HW Timer status. */
1026 asd_write_reg_byte(asd_ha, LmHWTSTATEN(lseq), LmHWTSTATEN_MASK);
1027
1028 /* Enable Primitive Status 0 and 1. */
1029 asd_write_reg_dword(asd_ha, LmPRIMSTAT0EN(lseq), LmPRIMSTAT0EN_MASK);
1030 asd_write_reg_dword(asd_ha, LmPRIMSTAT1EN(lseq), LmPRIMSTAT1EN_MASK);
1031
1032 /* Enable Frame Error. */
1033 asd_write_reg_dword(asd_ha, LmFRMERREN(lseq), LmFRMERREN_MASK);
1034 asd_write_reg_byte(asd_ha, LmMnHOLDLVL(lseq, 0), 0x50);
1035
1036 /* Initialize Mode 0 Transfer Level to 512. */
1037 asd_write_reg_byte(asd_ha, LmMnXFRLVL(lseq, 0), LmMnXFRLVL_512);
1038 /* Initialize Mode 1 Transfer Level to 256. */
1039 asd_write_reg_byte(asd_ha, LmMnXFRLVL(lseq, 1), LmMnXFRLVL_256);
1040
1041 /* Initialize Program Count. */
1042 asd_write_reg_word(asd_ha, LmPRGMCNT(lseq), lseq_idle_loop);
1043
1044 /* Enable Blind SG Move. */
1045 asd_write_reg_dword(asd_ha, LmMODECTL(lseq), LmBLIND48);
1046 asd_write_reg_word(asd_ha, LmM3SATATIMER(lseq),
1047 ASD_SATA_INTERLOCK_TIMEOUT);
1048
1049 (void) asd_read_reg_dword(asd_ha, LmREQMBX(lseq));
1050
1051 /* Clear Primitive Status 0 and 1. */
1052 asd_write_reg_dword(asd_ha, LmPRMSTAT0(lseq), 0xFFFFFFFF);
1053 asd_write_reg_dword(asd_ha, LmPRMSTAT1(lseq), 0xFFFFFFFF);
1054
1055 /* Clear HW Timer status. */
1056 asd_write_reg_byte(asd_ha, LmHWTSTAT(lseq), 0xFF);
1057
1058 /* Clear DMA Errors for Mode 0 and 1. */
1059 asd_write_reg_byte(asd_ha, LmMnDMAERRS(lseq, 0), 0xFF);
1060 asd_write_reg_byte(asd_ha, LmMnDMAERRS(lseq, 1), 0xFF);
1061
1062 /* Clear SG DMA Errors for Mode 0 and 1. */
1063 asd_write_reg_byte(asd_ha, LmMnSGDMAERRS(lseq, 0), 0xFF);
1064 asd_write_reg_byte(asd_ha, LmMnSGDMAERRS(lseq, 1), 0xFF);
1065
1066 /* Clear Mode 0 Buffer Parity Error. */
1067 asd_write_reg_byte(asd_ha, LmMnBUFSTAT(lseq, 0), LmMnBUFPERR);
1068
1069 /* Clear Mode 0 Frame Error register. */
1070 asd_write_reg_dword(asd_ha, LmMnFRMERR(lseq, 0), 0xFFFFFFFF);
1071
1072 /* Reset LSEQ external interrupt arbiter. */
1073 asd_write_reg_byte(asd_ha, LmARP2INTCTL(lseq), RSTINTCTL);
1074
1075 /* Set the Phy SAS for the LmSEQ WWN. */
1076 sas_addr = asd_ha->phys[lseq].phy_desc->sas_addr;
1077 for (i = 0; i < SAS_ADDR_SIZE; i++)
1078 asd_write_reg_byte(asd_ha, LmWWN(lseq) + i, sas_addr[i]);
1079
1080 /* Set the Transmit Size to 1024 bytes, 0 = 256 Dwords. */
1081 asd_write_reg_byte(asd_ha, LmMnXMTSIZE(lseq, 1), 0);
1082
1083 /* Set the Bus Inactivity Time Limit Timer. */
1084 asd_write_reg_word(asd_ha, LmBITL_TIMER(lseq), 9);
1085
1086 /* Enable SATA Port Multiplier. */
1087 asd_write_reg_byte(asd_ha, LmMnSATAFS(lseq, 1), 0x80);
1088
1089 /* Initialize Interrupt Vector[0-10] address in Mode 3.
1090 * See the comment on CSEQ_INT_* */
1091 asd_write_reg_word(asd_ha, LmM3INTVEC0(lseq), lseq_vecs[0]);
1092 asd_write_reg_word(asd_ha, LmM3INTVEC1(lseq), lseq_vecs[1]);
1093 asd_write_reg_word(asd_ha, LmM3INTVEC2(lseq), lseq_vecs[2]);
1094 asd_write_reg_word(asd_ha, LmM3INTVEC3(lseq), lseq_vecs[3]);
1095 asd_write_reg_word(asd_ha, LmM3INTVEC4(lseq), lseq_vecs[4]);
1096 asd_write_reg_word(asd_ha, LmM3INTVEC5(lseq), lseq_vecs[5]);
1097 asd_write_reg_word(asd_ha, LmM3INTVEC6(lseq), lseq_vecs[6]);
1098 asd_write_reg_word(asd_ha, LmM3INTVEC7(lseq), lseq_vecs[7]);
1099 asd_write_reg_word(asd_ha, LmM3INTVEC8(lseq), lseq_vecs[8]);
1100 asd_write_reg_word(asd_ha, LmM3INTVEC9(lseq), lseq_vecs[9]);
1101 asd_write_reg_word(asd_ha, LmM3INTVEC10(lseq), lseq_vecs[10]);
1102 /*
1103 * Program the Link LED control, applicable only for
1104 * Chip Rev. B or later.
1105 */
1106 asd_write_reg_dword(asd_ha, LmCONTROL(lseq),
1107 (LEDTIMER | LEDMODE_TXRX | LEDTIMERS_100ms));
1108
1109 /* Set the Align Rate for SAS and STP mode. */
1110 asd_write_reg_byte(asd_ha, LmM1SASALIGN(lseq), SAS_ALIGN_DEFAULT);
1111 asd_write_reg_byte(asd_ha, LmM1STPALIGN(lseq), STP_ALIGN_DEFAULT);
1112}
1113
1114
1115/**
1116 * asd_post_init_cseq -- clear CSEQ Mode n Int. status and Response mailbox
1117 * @asd_ha: pointer to host adapter struct
1118 */
1119static void asd_post_init_cseq(struct asd_ha_struct *asd_ha)
1120{
1121 int i;
1122
1123 for (i = 0; i < 8; i++)
1124 asd_write_reg_dword(asd_ha, CMnINT(i), 0xFFFFFFFF);
1125 for (i = 0; i < 8; i++)
1126 asd_read_reg_dword(asd_ha, CMnRSPMBX(i));
1127 /* Reset the external interrupt arbiter. */
1128 asd_write_reg_byte(asd_ha, CARP2INTCTL, RSTINTCTL);
1129}
1130
1131/**
1132 * asd_init_ddb_0 -- initialize DDB 0
1133 * @asd_ha: pointer to host adapter structure
1134 *
1135 * Initialize DDB site 0 which is used internally by the sequencer.
1136 */
1137static void asd_init_ddb_0(struct asd_ha_struct *asd_ha)
1138{
1139 int i;
1140
1141 /* Zero out the DDB explicitly */
1142 for (i = 0; i < sizeof(struct asd_ddb_seq_shared); i+=4)
1143 asd_ddbsite_write_dword(asd_ha, 0, i, 0);
1144
1145 asd_ddbsite_write_word(asd_ha, 0,
1146 offsetof(struct asd_ddb_seq_shared, q_free_ddb_head), 0);
1147 asd_ddbsite_write_word(asd_ha, 0,
1148 offsetof(struct asd_ddb_seq_shared, q_free_ddb_tail),
1149 asd_ha->hw_prof.max_ddbs-1);
1150 asd_ddbsite_write_word(asd_ha, 0,
1151 offsetof(struct asd_ddb_seq_shared, q_free_ddb_cnt), 0);
1152 asd_ddbsite_write_word(asd_ha, 0,
1153 offsetof(struct asd_ddb_seq_shared, q_used_ddb_head), 0xFFFF);
1154 asd_ddbsite_write_word(asd_ha, 0,
1155 offsetof(struct asd_ddb_seq_shared, q_used_ddb_tail), 0xFFFF);
1156 asd_ddbsite_write_word(asd_ha, 0,
1157 offsetof(struct asd_ddb_seq_shared, shared_mem_lock), 0);
1158 asd_ddbsite_write_word(asd_ha, 0,
1159 offsetof(struct asd_ddb_seq_shared, smp_conn_tag), 0);
1160 asd_ddbsite_write_word(asd_ha, 0,
1161 offsetof(struct asd_ddb_seq_shared, est_nexus_buf_cnt), 0);
1162 asd_ddbsite_write_word(asd_ha, 0,
1163 offsetof(struct asd_ddb_seq_shared, est_nexus_buf_thresh),
1164 asd_ha->hw_prof.num_phys * 2);
1165 asd_ddbsite_write_byte(asd_ha, 0,
1166 offsetof(struct asd_ddb_seq_shared, settable_max_contexts),0);
1167 asd_ddbsite_write_byte(asd_ha, 0,
1168 offsetof(struct asd_ddb_seq_shared, conn_not_active), 0xFF);
1169 asd_ddbsite_write_byte(asd_ha, 0,
1170 offsetof(struct asd_ddb_seq_shared, phy_is_up), 0x00);
1171 /* DDB 0 is reserved */
1172 set_bit(0, asd_ha->hw_prof.ddb_bitmap);
1173}
1174
1175/**
1176 * asd_seq_setup_seqs -- setup and initialize central and link sequencers
1177 * @asd_ha: pointer to host adapter structure
1178 */
1179static void asd_seq_setup_seqs(struct asd_ha_struct *asd_ha)
1180{
1181 int lseq;
1182 u8 lseq_mask;
1183
1184 /* Initialize SCB sites. Done first to compute some values which
1185 * the rest of the init code depends on. */
1186 asd_init_scb_sites(asd_ha);
1187
1188 /* Initialize CSEQ Scratch RAM registers. */
1189 asd_init_cseq_scratch(asd_ha);
1190
1191 /* Initialize LmSEQ Scratch RAM registers. */
1192 asd_init_lseq_scratch(asd_ha);
1193
1194 /* Initialize CSEQ CIO registers. */
1195 asd_init_cseq_cio(asd_ha);
1196
1197 asd_init_ddb_0(asd_ha);
1198
1199 /* Initialize LmSEQ CIO registers. */
1200 lseq_mask = asd_ha->hw_prof.enabled_phys;
1201 for_each_sequencer(lseq_mask, lseq_mask, lseq)
1202 asd_init_lseq_cio(asd_ha, lseq);
1203 asd_post_init_cseq(asd_ha);
1204}
1205
1206
1207/**
1208 * asd_seq_start_cseq -- start the central sequencer, CSEQ
1209 * @asd_ha: pointer to host adapter structure
1210 */
1211static int asd_seq_start_cseq(struct asd_ha_struct *asd_ha)
1212{
1213 /* Reset the ARP2 instruction to location zero. */
1214 asd_write_reg_word(asd_ha, CPRGMCNT, cseq_idle_loop);
1215
1216 /* Unpause the CSEQ */
1217 return asd_unpause_cseq(asd_ha);
1218}
1219
1220/**
1221 * asd_seq_start_lseq -- start a link sequencer
1222 * @asd_ha: pointer to host adapter structure
1223 * @lseq: the link sequencer of interest
1224 */
1225static int asd_seq_start_lseq(struct asd_ha_struct *asd_ha, int lseq)
1226{
1227 /* Reset the ARP2 instruction to location zero. */
1228 asd_write_reg_word(asd_ha, LmPRGMCNT(lseq), lseq_idle_loop);
1229
1230 /* Unpause the LmSEQ */
1231 return asd_seq_unpause_lseq(asd_ha, lseq);
1232}
1233
1234static int asd_request_firmware(struct asd_ha_struct *asd_ha)
1235{
1236 int err, i;
1237 struct sequencer_file_header header, *hdr_ptr;
1238 u32 csum = 0;
1239 u16 *ptr_cseq_vecs, *ptr_lseq_vecs;
1240
1241 if (sequencer_fw)
1242 /* already loaded */
1243 return 0;
1244
1245 err = request_firmware(&sequencer_fw,
1246 SAS_RAZOR_SEQUENCER_FW_FILE,
1247 &asd_ha->pcidev->dev);
1248 if (err)
1249 return err;
1250
1251 hdr_ptr = (struct sequencer_file_header *)sequencer_fw->data;
1252
1253 header.csum = le32_to_cpu(hdr_ptr->csum);
1254 header.major = le32_to_cpu(hdr_ptr->major);
1255 header.minor = le32_to_cpu(hdr_ptr->minor);
1256 sequencer_version = hdr_ptr->version;
1257 header.cseq_table_offset = le32_to_cpu(hdr_ptr->cseq_table_offset);
1258 header.cseq_table_size = le32_to_cpu(hdr_ptr->cseq_table_size);
1259 header.lseq_table_offset = le32_to_cpu(hdr_ptr->lseq_table_offset);
1260 header.lseq_table_size = le32_to_cpu(hdr_ptr->lseq_table_size);
1261 header.cseq_code_offset = le32_to_cpu(hdr_ptr->cseq_code_offset);
1262 header.cseq_code_size = le32_to_cpu(hdr_ptr->cseq_code_size);
1263 header.lseq_code_offset = le32_to_cpu(hdr_ptr->lseq_code_offset);
1264 header.lseq_code_size = le32_to_cpu(hdr_ptr->lseq_code_size);
1265 header.mode2_task = le16_to_cpu(hdr_ptr->mode2_task);
1266 header.cseq_idle_loop = le16_to_cpu(hdr_ptr->cseq_idle_loop);
1267 header.lseq_idle_loop = le16_to_cpu(hdr_ptr->lseq_idle_loop);
1268
1269 for (i = sizeof(header.csum); i < sequencer_fw->size; i++)
1270 csum += sequencer_fw->data[i];
1271
1272 if (csum != header.csum) {
1273 asd_printk("Firmware file checksum mismatch\n");
1274 return -EINVAL;
1275 }
1276
1277 if (header.cseq_table_size != CSEQ_NUM_VECS ||
1278 header.lseq_table_size != LSEQ_NUM_VECS) {
1279 asd_printk("Firmware file table size mismatch\n");
1280 return -EINVAL;
1281 }
1282
1283 ptr_cseq_vecs = (u16 *)&sequencer_fw->data[header.cseq_table_offset];
1284 ptr_lseq_vecs = (u16 *)&sequencer_fw->data[header.lseq_table_offset];
1285 mode2_task = header.mode2_task;
1286 cseq_idle_loop = header.cseq_idle_loop;
1287 lseq_idle_loop = header.lseq_idle_loop;
1288
1289 for (i = 0; i < CSEQ_NUM_VECS; i++)
1290 cseq_vecs[i] = le16_to_cpu(ptr_cseq_vecs[i]);
1291
1292 for (i = 0; i < LSEQ_NUM_VECS; i++)
1293 lseq_vecs[i] = le16_to_cpu(ptr_lseq_vecs[i]);
1294
1295 cseq_code = &sequencer_fw->data[header.cseq_code_offset];
1296 cseq_code_size = header.cseq_code_size;
1297 lseq_code = &sequencer_fw->data[header.lseq_code_offset];
1298 lseq_code_size = header.lseq_code_size;
1299
1300 return 0;
1301}
1302
1303int asd_init_seqs(struct asd_ha_struct *asd_ha)
1304{
1305 int err;
1306
1307 err = asd_request_firmware(asd_ha);
1308
1309 if (err) {
1310 asd_printk("Failed to load sequencer firmware file %s, error %d\n",
1311 SAS_RAZOR_SEQUENCER_FW_FILE, err);
1312 return err;
1313 }
1314
1315 asd_printk("using sequencer %s\n", sequencer_version);
1316 err = asd_seq_download_seqs(asd_ha);
1317 if (err) {
1318 asd_printk("couldn't download sequencers for %s\n",
1319 pci_name(asd_ha->pcidev));
1320 return err;
1321 }
1322
1323 asd_seq_setup_seqs(asd_ha);
1324
1325 return 0;
1326}
1327
1328int asd_start_seqs(struct asd_ha_struct *asd_ha)
1329{
1330 int err;
1331 u8 lseq_mask;
1332 int lseq;
1333
1334 err = asd_seq_start_cseq(asd_ha);
1335 if (err) {
1336 asd_printk("couldn't start CSEQ for %s\n",
1337 pci_name(asd_ha->pcidev));
1338 return err;
1339 }
1340
1341 lseq_mask = asd_ha->hw_prof.enabled_phys;
1342 for_each_sequencer(lseq_mask, lseq_mask, lseq) {
1343 err = asd_seq_start_lseq(asd_ha, lseq);
1344 if (err) {
1345 asd_printk("coudln't start LSEQ %d for %s\n", lseq,
1346 pci_name(asd_ha->pcidev));
1347 return err;
1348 }
1349 }
1350
1351 return 0;
1352}
1353
1354/**
1355 * asd_update_port_links -- update port_map_by_links and phy_is_up
1356 * @sas_phy: pointer to the phy which has been added to a port
1357 *
1358 * 1) When a link reset has completed and we got BYTES DMAED with a
1359 * valid frame we call this function for that phy, to indicate that
1360 * the phy is up, i.e. we update the phy_is_up in DDB 0. The
1361 * sequencer checks phy_is_up when pending SCBs are to be sent, and
1362 * when an open address frame has been received.
1363 *
1364 * 2) When we know of ports, we call this function to update the map
1365 * of phys participaing in that port, i.e. we update the
1366 * port_map_by_links in DDB 0. When a HARD_RESET primitive has been
1367 * received, the sequencer disables all phys in that port.
1368 * port_map_by_links is also used as the conn_mask byte in the
1369 * initiator/target port DDB.
1370 */
1371void asd_update_port_links(struct asd_sas_phy *sas_phy)
1372{
1373 struct asd_ha_struct *asd_ha = sas_phy->ha->lldd_ha;
1374 const u8 phy_mask = (u8) sas_phy->port->phy_mask;
1375 u8 phy_is_up;
1376 u8 mask;
1377 int i, err;
1378
1379 for_each_phy(phy_mask, mask, i)
1380 asd_ddbsite_write_byte(asd_ha, 0,
1381 offsetof(struct asd_ddb_seq_shared,
1382 port_map_by_links)+i,phy_mask);
1383
1384 for (i = 0; i < 12; i++) {
1385 phy_is_up = asd_ddbsite_read_byte(asd_ha, 0,
1386 offsetof(struct asd_ddb_seq_shared, phy_is_up));
1387 err = asd_ddbsite_update_byte(asd_ha, 0,
1388 offsetof(struct asd_ddb_seq_shared, phy_is_up),
1389 phy_is_up,
1390 phy_is_up | phy_mask);
1391 if (!err)
1392 break;
1393 else if (err == -EFAULT) {
1394 asd_printk("phy_is_up: parity error in DDB 0\n");
1395 break;
1396 }
1397 }
1398
1399 if (err)
1400 asd_printk("couldn't update DDB 0:error:%d\n", err);
1401}