blob: 77b06a983fa763023f245c010fb738616c7d18bc [file] [log] [blame]
David S. Millercd9ad582007-04-26 21:19:23 -07001/* esp_scsi.c: ESP SCSI driver.
2 *
3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
4 */
5
6#include <linux/kernel.h>
7#include <linux/types.h>
8#include <linux/slab.h>
9#include <linux/delay.h>
10#include <linux/list.h>
11#include <linux/completion.h>
12#include <linux/kallsyms.h>
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15#include <linux/init.h>
Alexey Dobriyane1f2a092007-04-27 15:19:27 -070016#include <linux/irqreturn.h>
David S. Millercd9ad582007-04-26 21:19:23 -070017
18#include <asm/irq.h>
19#include <asm/io.h>
20#include <asm/dma.h>
21
22#include <scsi/scsi.h>
23#include <scsi/scsi_host.h>
24#include <scsi/scsi_cmnd.h>
25#include <scsi/scsi_device.h>
26#include <scsi/scsi_tcq.h>
27#include <scsi/scsi_dbg.h>
28#include <scsi/scsi_transport_spi.h>
29
30#include "esp_scsi.h"
31
32#define DRV_MODULE_NAME "esp"
33#define PFX DRV_MODULE_NAME ": "
34#define DRV_VERSION "2.000"
35#define DRV_MODULE_RELDATE "April 19, 2007"
36
37/* SCSI bus reset settle time in seconds. */
38static int esp_bus_reset_settle = 3;
39
40static u32 esp_debug;
41#define ESP_DEBUG_INTR 0x00000001
42#define ESP_DEBUG_SCSICMD 0x00000002
43#define ESP_DEBUG_RESET 0x00000004
44#define ESP_DEBUG_MSGIN 0x00000008
45#define ESP_DEBUG_MSGOUT 0x00000010
46#define ESP_DEBUG_CMDDONE 0x00000020
47#define ESP_DEBUG_DISCONNECT 0x00000040
48#define ESP_DEBUG_DATASTART 0x00000080
49#define ESP_DEBUG_DATADONE 0x00000100
50#define ESP_DEBUG_RECONNECT 0x00000200
51#define ESP_DEBUG_AUTOSENSE 0x00000400
52
53#define esp_log_intr(f, a...) \
54do { if (esp_debug & ESP_DEBUG_INTR) \
55 printk(f, ## a); \
56} while (0)
57
58#define esp_log_reset(f, a...) \
59do { if (esp_debug & ESP_DEBUG_RESET) \
60 printk(f, ## a); \
61} while (0)
62
63#define esp_log_msgin(f, a...) \
64do { if (esp_debug & ESP_DEBUG_MSGIN) \
65 printk(f, ## a); \
66} while (0)
67
68#define esp_log_msgout(f, a...) \
69do { if (esp_debug & ESP_DEBUG_MSGOUT) \
70 printk(f, ## a); \
71} while (0)
72
73#define esp_log_cmddone(f, a...) \
74do { if (esp_debug & ESP_DEBUG_CMDDONE) \
75 printk(f, ## a); \
76} while (0)
77
78#define esp_log_disconnect(f, a...) \
79do { if (esp_debug & ESP_DEBUG_DISCONNECT) \
80 printk(f, ## a); \
81} while (0)
82
83#define esp_log_datastart(f, a...) \
84do { if (esp_debug & ESP_DEBUG_DATASTART) \
85 printk(f, ## a); \
86} while (0)
87
88#define esp_log_datadone(f, a...) \
89do { if (esp_debug & ESP_DEBUG_DATADONE) \
90 printk(f, ## a); \
91} while (0)
92
93#define esp_log_reconnect(f, a...) \
94do { if (esp_debug & ESP_DEBUG_RECONNECT) \
95 printk(f, ## a); \
96} while (0)
97
98#define esp_log_autosense(f, a...) \
99do { if (esp_debug & ESP_DEBUG_AUTOSENSE) \
100 printk(f, ## a); \
101} while (0)
102
103#define esp_read8(REG) esp->ops->esp_read8(esp, REG)
104#define esp_write8(VAL,REG) esp->ops->esp_write8(esp, VAL, REG)
105
106static void esp_log_fill_regs(struct esp *esp,
107 struct esp_event_ent *p)
108{
109 p->sreg = esp->sreg;
110 p->seqreg = esp->seqreg;
111 p->sreg2 = esp->sreg2;
112 p->ireg = esp->ireg;
113 p->select_state = esp->select_state;
114 p->event = esp->event;
115}
116
117void scsi_esp_cmd(struct esp *esp, u8 val)
118{
119 struct esp_event_ent *p;
120 int idx = esp->esp_event_cur;
121
122 p = &esp->esp_event_log[idx];
123 p->type = ESP_EVENT_TYPE_CMD;
124 p->val = val;
125 esp_log_fill_regs(esp, p);
126
127 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
128
129 esp_write8(val, ESP_CMD);
130}
131EXPORT_SYMBOL(scsi_esp_cmd);
132
133static void esp_event(struct esp *esp, u8 val)
134{
135 struct esp_event_ent *p;
136 int idx = esp->esp_event_cur;
137
138 p = &esp->esp_event_log[idx];
139 p->type = ESP_EVENT_TYPE_EVENT;
140 p->val = val;
141 esp_log_fill_regs(esp, p);
142
143 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
144
145 esp->event = val;
146}
147
148static void esp_dump_cmd_log(struct esp *esp)
149{
150 int idx = esp->esp_event_cur;
151 int stop = idx;
152
153 printk(KERN_INFO PFX "esp%d: Dumping command log\n",
154 esp->host->unique_id);
155 do {
156 struct esp_event_ent *p = &esp->esp_event_log[idx];
157
158 printk(KERN_INFO PFX "esp%d: ent[%d] %s ",
159 esp->host->unique_id, idx,
160 p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT");
161
162 printk("val[%02x] sreg[%02x] seqreg[%02x] "
163 "sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n",
164 p->val, p->sreg, p->seqreg,
165 p->sreg2, p->ireg, p->select_state, p->event);
166
167 idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
168 } while (idx != stop);
169}
170
171static void esp_flush_fifo(struct esp *esp)
172{
173 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
174 if (esp->rev == ESP236) {
175 int lim = 1000;
176
177 while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) {
178 if (--lim == 0) {
179 printk(KERN_ALERT PFX "esp%d: ESP_FF_BYTES "
180 "will not clear!\n",
181 esp->host->unique_id);
182 break;
183 }
184 udelay(1);
185 }
186 }
187}
188
189static void hme_read_fifo(struct esp *esp)
190{
191 int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
192 int idx = 0;
193
194 while (fcnt--) {
195 esp->fifo[idx++] = esp_read8(ESP_FDATA);
196 esp->fifo[idx++] = esp_read8(ESP_FDATA);
197 }
198 if (esp->sreg2 & ESP_STAT2_F1BYTE) {
199 esp_write8(0, ESP_FDATA);
200 esp->fifo[idx++] = esp_read8(ESP_FDATA);
201 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
202 }
203 esp->fifo_cnt = idx;
204}
205
206static void esp_set_all_config3(struct esp *esp, u8 val)
207{
208 int i;
209
210 for (i = 0; i < ESP_MAX_TARGET; i++)
211 esp->target[i].esp_config3 = val;
212}
213
214/* Reset the ESP chip, _not_ the SCSI bus. */
215static void esp_reset_esp(struct esp *esp)
216{
217 u8 family_code, version;
218
219 /* Now reset the ESP chip */
220 scsi_esp_cmd(esp, ESP_CMD_RC);
221 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
222 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
223
224 /* Reload the configuration registers */
225 esp_write8(esp->cfact, ESP_CFACT);
226
227 esp->prev_stp = 0;
228 esp_write8(esp->prev_stp, ESP_STP);
229
230 esp->prev_soff = 0;
231 esp_write8(esp->prev_soff, ESP_SOFF);
232
233 esp_write8(esp->neg_defp, ESP_TIMEO);
234
235 /* This is the only point at which it is reliable to read
236 * the ID-code for a fast ESP chip variants.
237 */
238 esp->max_period = ((35 * esp->ccycle) / 1000);
239 if (esp->rev == FAST) {
240 version = esp_read8(ESP_UID);
241 family_code = (version & 0xf8) >> 3;
242 if (family_code == 0x02)
243 esp->rev = FAS236;
244 else if (family_code == 0x0a)
245 esp->rev = FASHME; /* Version is usually '5'. */
246 else
247 esp->rev = FAS100A;
248 esp->min_period = ((4 * esp->ccycle) / 1000);
249 } else {
250 esp->min_period = ((5 * esp->ccycle) / 1000);
251 }
252 esp->max_period = (esp->max_period + 3)>>2;
253 esp->min_period = (esp->min_period + 3)>>2;
254
255 esp_write8(esp->config1, ESP_CFG1);
256 switch (esp->rev) {
257 case ESP100:
258 /* nothing to do */
259 break;
260
261 case ESP100A:
262 esp_write8(esp->config2, ESP_CFG2);
263 break;
264
265 case ESP236:
266 /* Slow 236 */
267 esp_write8(esp->config2, ESP_CFG2);
268 esp->prev_cfg3 = esp->target[0].esp_config3;
269 esp_write8(esp->prev_cfg3, ESP_CFG3);
270 break;
271
272 case FASHME:
273 esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB);
274 /* fallthrough... */
275
276 case FAS236:
277 /* Fast 236 or HME */
278 esp_write8(esp->config2, ESP_CFG2);
279 if (esp->rev == FASHME) {
280 u8 cfg3 = esp->target[0].esp_config3;
281
282 cfg3 |= ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH;
283 if (esp->scsi_id >= 8)
284 cfg3 |= ESP_CONFIG3_IDBIT3;
285 esp_set_all_config3(esp, cfg3);
286 } else {
287 u32 cfg3 = esp->target[0].esp_config3;
288
289 cfg3 |= ESP_CONFIG3_FCLK;
290 esp_set_all_config3(esp, cfg3);
291 }
292 esp->prev_cfg3 = esp->target[0].esp_config3;
293 esp_write8(esp->prev_cfg3, ESP_CFG3);
294 if (esp->rev == FASHME) {
295 esp->radelay = 80;
296 } else {
297 if (esp->flags & ESP_FLAG_DIFFERENTIAL)
298 esp->radelay = 0;
299 else
300 esp->radelay = 96;
301 }
302 break;
303
304 case FAS100A:
305 /* Fast 100a */
306 esp_write8(esp->config2, ESP_CFG2);
307 esp_set_all_config3(esp,
308 (esp->target[0].esp_config3 |
309 ESP_CONFIG3_FCLOCK));
310 esp->prev_cfg3 = esp->target[0].esp_config3;
311 esp_write8(esp->prev_cfg3, ESP_CFG3);
312 esp->radelay = 32;
313 break;
314
315 default:
316 break;
317 }
318
319 /* Eat any bitrot in the chip */
320 esp_read8(ESP_INTRPT);
321 udelay(100);
322}
323
324static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
325{
326 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
FUJITA Tomonori4c2baaa2007-05-26 04:51:32 +0900327 struct scatterlist *sg = scsi_sglist(cmd);
David S. Millercd9ad582007-04-26 21:19:23 -0700328 int dir = cmd->sc_data_direction;
329 int total, i;
330
331 if (dir == DMA_NONE)
332 return;
333
FUJITA Tomonori4c2baaa2007-05-26 04:51:32 +0900334 spriv->u.num_sg = esp->ops->map_sg(esp, sg, scsi_sg_count(cmd), dir);
David S. Millercd9ad582007-04-26 21:19:23 -0700335 spriv->cur_residue = sg_dma_len(sg);
336 spriv->cur_sg = sg;
337
338 total = 0;
339 for (i = 0; i < spriv->u.num_sg; i++)
340 total += sg_dma_len(&sg[i]);
341 spriv->tot_residue = total;
342}
343
344static dma_addr_t esp_cur_dma_addr(struct esp_cmd_entry *ent,
345 struct scsi_cmnd *cmd)
346{
347 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
348
349 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
350 return ent->sense_dma +
351 (ent->sense_ptr - cmd->sense_buffer);
352 }
353
354 return sg_dma_address(p->cur_sg) +
355 (sg_dma_len(p->cur_sg) -
356 p->cur_residue);
357}
358
359static unsigned int esp_cur_dma_len(struct esp_cmd_entry *ent,
360 struct scsi_cmnd *cmd)
361{
362 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
363
364 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
365 return SCSI_SENSE_BUFFERSIZE -
366 (ent->sense_ptr - cmd->sense_buffer);
367 }
368 return p->cur_residue;
369}
370
371static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent,
372 struct scsi_cmnd *cmd, unsigned int len)
373{
374 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
375
376 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
377 ent->sense_ptr += len;
378 return;
379 }
380
381 p->cur_residue -= len;
382 p->tot_residue -= len;
383 if (p->cur_residue < 0 || p->tot_residue < 0) {
384 printk(KERN_ERR PFX "esp%d: Data transfer overflow.\n",
385 esp->host->unique_id);
386 printk(KERN_ERR PFX "esp%d: cur_residue[%d] tot_residue[%d] "
387 "len[%u]\n",
388 esp->host->unique_id,
389 p->cur_residue, p->tot_residue, len);
390 p->cur_residue = 0;
391 p->tot_residue = 0;
392 }
393 if (!p->cur_residue && p->tot_residue) {
394 p->cur_sg++;
395 p->cur_residue = sg_dma_len(p->cur_sg);
396 }
397}
398
399static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd)
400{
401 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
402 int dir = cmd->sc_data_direction;
403
404 if (dir == DMA_NONE)
405 return;
406
FUJITA Tomonori4c2baaa2007-05-26 04:51:32 +0900407 esp->ops->unmap_sg(esp, scsi_sglist(cmd), spriv->u.num_sg, dir);
David S. Millercd9ad582007-04-26 21:19:23 -0700408}
409
410static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
411{
412 struct scsi_cmnd *cmd = ent->cmd;
413 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
414
415 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
416 ent->saved_sense_ptr = ent->sense_ptr;
417 return;
418 }
419 ent->saved_cur_residue = spriv->cur_residue;
420 ent->saved_cur_sg = spriv->cur_sg;
421 ent->saved_tot_residue = spriv->tot_residue;
422}
423
424static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent)
425{
426 struct scsi_cmnd *cmd = ent->cmd;
427 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
428
429 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
430 ent->sense_ptr = ent->saved_sense_ptr;
431 return;
432 }
433 spriv->cur_residue = ent->saved_cur_residue;
434 spriv->cur_sg = ent->saved_cur_sg;
435 spriv->tot_residue = ent->saved_tot_residue;
436}
437
438static void esp_check_command_len(struct esp *esp, struct scsi_cmnd *cmd)
439{
440 if (cmd->cmd_len == 6 ||
441 cmd->cmd_len == 10 ||
442 cmd->cmd_len == 12) {
443 esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
444 } else {
445 esp->flags |= ESP_FLAG_DOING_SLOWCMD;
446 }
447}
448
449static void esp_write_tgt_config3(struct esp *esp, int tgt)
450{
451 if (esp->rev > ESP100A) {
452 u8 val = esp->target[tgt].esp_config3;
453
454 if (val != esp->prev_cfg3) {
455 esp->prev_cfg3 = val;
456 esp_write8(val, ESP_CFG3);
457 }
458 }
459}
460
461static void esp_write_tgt_sync(struct esp *esp, int tgt)
462{
463 u8 off = esp->target[tgt].esp_offset;
464 u8 per = esp->target[tgt].esp_period;
465
466 if (off != esp->prev_soff) {
467 esp->prev_soff = off;
468 esp_write8(off, ESP_SOFF);
469 }
470 if (per != esp->prev_stp) {
471 esp->prev_stp = per;
472 esp_write8(per, ESP_STP);
473 }
474}
475
476static u32 esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
477{
478 if (esp->rev == FASHME) {
479 /* Arbitrary segment boundaries, 24-bit counts. */
480 if (dma_len > (1U << 24))
481 dma_len = (1U << 24);
482 } else {
483 u32 base, end;
484
485 /* ESP chip limits other variants by 16-bits of transfer
486 * count. Actually on FAS100A and FAS236 we could get
487 * 24-bits of transfer count by enabling ESP_CONFIG2_FENAB
488 * in the ESP_CFG2 register but that causes other unwanted
489 * changes so we don't use it currently.
490 */
491 if (dma_len > (1U << 16))
492 dma_len = (1U << 16);
493
494 /* All of the DMA variants hooked up to these chips
495 * cannot handle crossing a 24-bit address boundary.
496 */
497 base = dma_addr & ((1U << 24) - 1U);
498 end = base + dma_len;
499 if (end > (1U << 24))
500 end = (1U <<24);
501 dma_len = end - base;
502 }
503 return dma_len;
504}
505
506static int esp_need_to_nego_wide(struct esp_target_data *tp)
507{
508 struct scsi_target *target = tp->starget;
509
510 return spi_width(target) != tp->nego_goal_width;
511}
512
513static int esp_need_to_nego_sync(struct esp_target_data *tp)
514{
515 struct scsi_target *target = tp->starget;
516
517 /* When offset is zero, period is "don't care". */
518 if (!spi_offset(target) && !tp->nego_goal_offset)
519 return 0;
520
521 if (spi_offset(target) == tp->nego_goal_offset &&
522 spi_period(target) == tp->nego_goal_period)
523 return 0;
524
525 return 1;
526}
527
528static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
529 struct esp_lun_data *lp)
530{
531 if (!ent->tag[0]) {
532 /* Non-tagged, slot already taken? */
533 if (lp->non_tagged_cmd)
534 return -EBUSY;
535
536 if (lp->hold) {
537 /* We are being held by active tagged
538 * commands.
539 */
540 if (lp->num_tagged)
541 return -EBUSY;
542
543 /* Tagged commands completed, we can unplug
544 * the queue and run this untagged command.
545 */
546 lp->hold = 0;
547 } else if (lp->num_tagged) {
548 /* Plug the queue until num_tagged decreases
549 * to zero in esp_free_lun_tag.
550 */
551 lp->hold = 1;
552 return -EBUSY;
553 }
554
555 lp->non_tagged_cmd = ent;
556 return 0;
557 } else {
558 /* Tagged command, see if blocked by a
559 * non-tagged one.
560 */
561 if (lp->non_tagged_cmd || lp->hold)
562 return -EBUSY;
563 }
564
565 BUG_ON(lp->tagged_cmds[ent->tag[1]]);
566
567 lp->tagged_cmds[ent->tag[1]] = ent;
568 lp->num_tagged++;
569
570 return 0;
571}
572
573static void esp_free_lun_tag(struct esp_cmd_entry *ent,
574 struct esp_lun_data *lp)
575{
576 if (ent->tag[0]) {
577 BUG_ON(lp->tagged_cmds[ent->tag[1]] != ent);
578 lp->tagged_cmds[ent->tag[1]] = NULL;
579 lp->num_tagged--;
580 } else {
581 BUG_ON(lp->non_tagged_cmd != ent);
582 lp->non_tagged_cmd = NULL;
583 }
584}
585
586/* When a contingent allegiance conditon is created, we force feed a
587 * REQUEST_SENSE command to the device to fetch the sense data. I
588 * tried many other schemes, relying on the scsi error handling layer
589 * to send out the REQUEST_SENSE automatically, but this was difficult
590 * to get right especially in the presence of applications like smartd
591 * which use SG_IO to send out their own REQUEST_SENSE commands.
592 */
593static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent)
594{
595 struct scsi_cmnd *cmd = ent->cmd;
596 struct scsi_device *dev = cmd->device;
597 int tgt, lun;
598 u8 *p, val;
599
600 tgt = dev->id;
601 lun = dev->lun;
602
603
604 if (!ent->sense_ptr) {
605 esp_log_autosense("esp%d: Doing auto-sense for "
606 "tgt[%d] lun[%d]\n",
607 esp->host->unique_id, tgt, lun);
608
609 ent->sense_ptr = cmd->sense_buffer;
610 ent->sense_dma = esp->ops->map_single(esp,
611 ent->sense_ptr,
612 SCSI_SENSE_BUFFERSIZE,
613 DMA_FROM_DEVICE);
614 }
615 ent->saved_sense_ptr = ent->sense_ptr;
616
617 esp->active_cmd = ent;
618
619 p = esp->command_block;
620 esp->msg_out_len = 0;
621
622 *p++ = IDENTIFY(0, lun);
623 *p++ = REQUEST_SENSE;
624 *p++ = ((dev->scsi_level <= SCSI_2) ?
625 (lun << 5) : 0);
626 *p++ = 0;
627 *p++ = 0;
628 *p++ = SCSI_SENSE_BUFFERSIZE;
629 *p++ = 0;
630
631 esp->select_state = ESP_SELECT_BASIC;
632
633 val = tgt;
634 if (esp->rev == FASHME)
635 val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
636 esp_write8(val, ESP_BUSID);
637
638 esp_write_tgt_sync(esp, tgt);
639 esp_write_tgt_config3(esp, tgt);
640
641 val = (p - esp->command_block);
642
643 if (esp->rev == FASHME)
644 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
645 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
646 val, 16, 0, ESP_CMD_DMA | ESP_CMD_SELA);
647}
648
649static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
650{
651 struct esp_cmd_entry *ent;
652
653 list_for_each_entry(ent, &esp->queued_cmds, list) {
654 struct scsi_cmnd *cmd = ent->cmd;
655 struct scsi_device *dev = cmd->device;
656 struct esp_lun_data *lp = dev->hostdata;
657
658 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
659 ent->tag[0] = 0;
660 ent->tag[1] = 0;
661 return ent;
662 }
663
664 if (!scsi_populate_tag_msg(cmd, &ent->tag[0])) {
665 ent->tag[0] = 0;
666 ent->tag[1] = 0;
667 }
668
669 if (esp_alloc_lun_tag(ent, lp) < 0)
670 continue;
671
672 return ent;
673 }
674
675 return NULL;
676}
677
678static void esp_maybe_execute_command(struct esp *esp)
679{
680 struct esp_target_data *tp;
681 struct esp_lun_data *lp;
682 struct scsi_device *dev;
683 struct scsi_cmnd *cmd;
684 struct esp_cmd_entry *ent;
685 int tgt, lun, i;
686 u32 val, start_cmd;
687 u8 *p;
688
689 if (esp->active_cmd ||
690 (esp->flags & ESP_FLAG_RESETTING))
691 return;
692
693 ent = find_and_prep_issuable_command(esp);
694 if (!ent)
695 return;
696
697 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
698 esp_autosense(esp, ent);
699 return;
700 }
701
702 cmd = ent->cmd;
703 dev = cmd->device;
704 tgt = dev->id;
705 lun = dev->lun;
706 tp = &esp->target[tgt];
707 lp = dev->hostdata;
708
709 list_del(&ent->list);
710 list_add(&ent->list, &esp->active_cmds);
711
712 esp->active_cmd = ent;
713
714 esp_map_dma(esp, cmd);
715 esp_save_pointers(esp, ent);
716
717 esp_check_command_len(esp, cmd);
718
719 p = esp->command_block;
720
721 esp->msg_out_len = 0;
722 if (tp->flags & ESP_TGT_CHECK_NEGO) {
723 /* Need to negotiate. If the target is broken
724 * go for synchronous transfers and non-wide.
725 */
726 if (tp->flags & ESP_TGT_BROKEN) {
727 tp->flags &= ~ESP_TGT_DISCONNECT;
728 tp->nego_goal_period = 0;
729 tp->nego_goal_offset = 0;
730 tp->nego_goal_width = 0;
731 tp->nego_goal_tags = 0;
732 }
733
734 /* If the settings are not changing, skip this. */
735 if (spi_width(tp->starget) == tp->nego_goal_width &&
736 spi_period(tp->starget) == tp->nego_goal_period &&
737 spi_offset(tp->starget) == tp->nego_goal_offset) {
738 tp->flags &= ~ESP_TGT_CHECK_NEGO;
739 goto build_identify;
740 }
741
742 if (esp->rev == FASHME && esp_need_to_nego_wide(tp)) {
743 esp->msg_out_len =
744 spi_populate_width_msg(&esp->msg_out[0],
745 (tp->nego_goal_width ?
746 1 : 0));
747 tp->flags |= ESP_TGT_NEGO_WIDE;
748 } else if (esp_need_to_nego_sync(tp)) {
749 esp->msg_out_len =
750 spi_populate_sync_msg(&esp->msg_out[0],
751 tp->nego_goal_period,
752 tp->nego_goal_offset);
753 tp->flags |= ESP_TGT_NEGO_SYNC;
754 } else {
755 tp->flags &= ~ESP_TGT_CHECK_NEGO;
756 }
757
758 /* Process it like a slow command. */
759 if (tp->flags & (ESP_TGT_NEGO_WIDE | ESP_TGT_NEGO_SYNC))
760 esp->flags |= ESP_FLAG_DOING_SLOWCMD;
761 }
762
763build_identify:
764 /* If we don't have a lun-data struct yet, we're probing
765 * so do not disconnect. Also, do not disconnect unless
766 * we have a tag on this command.
767 */
768 if (lp && (tp->flags & ESP_TGT_DISCONNECT) && ent->tag[0])
769 *p++ = IDENTIFY(1, lun);
770 else
771 *p++ = IDENTIFY(0, lun);
772
773 if (ent->tag[0] && esp->rev == ESP100) {
774 /* ESP100 lacks select w/atn3 command, use select
775 * and stop instead.
776 */
777 esp->flags |= ESP_FLAG_DOING_SLOWCMD;
778 }
779
780 if (!(esp->flags & ESP_FLAG_DOING_SLOWCMD)) {
781 start_cmd = ESP_CMD_DMA | ESP_CMD_SELA;
782 if (ent->tag[0]) {
783 *p++ = ent->tag[0];
784 *p++ = ent->tag[1];
785
786 start_cmd = ESP_CMD_DMA | ESP_CMD_SA3;
787 }
788
789 for (i = 0; i < cmd->cmd_len; i++)
790 *p++ = cmd->cmnd[i];
791
792 esp->select_state = ESP_SELECT_BASIC;
793 } else {
794 esp->cmd_bytes_left = cmd->cmd_len;
795 esp->cmd_bytes_ptr = &cmd->cmnd[0];
796
797 if (ent->tag[0]) {
798 for (i = esp->msg_out_len - 1;
799 i >= 0; i--)
800 esp->msg_out[i + 2] = esp->msg_out[i];
801 esp->msg_out[0] = ent->tag[0];
802 esp->msg_out[1] = ent->tag[1];
803 esp->msg_out_len += 2;
804 }
805
806 start_cmd = ESP_CMD_DMA | ESP_CMD_SELAS;
807 esp->select_state = ESP_SELECT_MSGOUT;
808 }
809 val = tgt;
810 if (esp->rev == FASHME)
811 val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
812 esp_write8(val, ESP_BUSID);
813
814 esp_write_tgt_sync(esp, tgt);
815 esp_write_tgt_config3(esp, tgt);
816
817 val = (p - esp->command_block);
818
819 if (esp_debug & ESP_DEBUG_SCSICMD) {
820 printk("ESP: tgt[%d] lun[%d] scsi_cmd [ ", tgt, lun);
821 for (i = 0; i < cmd->cmd_len; i++)
822 printk("%02x ", cmd->cmnd[i]);
823 printk("]\n");
824 }
825
826 if (esp->rev == FASHME)
827 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
828 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
829 val, 16, 0, start_cmd);
830}
831
832static struct esp_cmd_entry *esp_get_ent(struct esp *esp)
833{
834 struct list_head *head = &esp->esp_cmd_pool;
835 struct esp_cmd_entry *ret;
836
837 if (list_empty(head)) {
838 ret = kzalloc(sizeof(struct esp_cmd_entry), GFP_ATOMIC);
839 } else {
840 ret = list_entry(head->next, struct esp_cmd_entry, list);
841 list_del(&ret->list);
842 memset(ret, 0, sizeof(*ret));
843 }
844 return ret;
845}
846
847static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent)
848{
849 list_add(&ent->list, &esp->esp_cmd_pool);
850}
851
852static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
853 struct scsi_cmnd *cmd, unsigned int result)
854{
855 struct scsi_device *dev = cmd->device;
856 int tgt = dev->id;
857 int lun = dev->lun;
858
859 esp->active_cmd = NULL;
860 esp_unmap_dma(esp, cmd);
861 esp_free_lun_tag(ent, dev->hostdata);
862 cmd->result = result;
863
864 if (ent->eh_done) {
865 complete(ent->eh_done);
866 ent->eh_done = NULL;
867 }
868
869 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
870 esp->ops->unmap_single(esp, ent->sense_dma,
871 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
872 ent->sense_ptr = NULL;
873
874 /* Restore the message/status bytes to what we actually
875 * saw originally. Also, report that we are providing
876 * the sense data.
877 */
878 cmd->result = ((DRIVER_SENSE << 24) |
879 (DID_OK << 16) |
880 (COMMAND_COMPLETE << 8) |
881 (SAM_STAT_CHECK_CONDITION << 0));
882
883 ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE;
884 if (esp_debug & ESP_DEBUG_AUTOSENSE) {
885 int i;
886
887 printk("esp%d: tgt[%d] lun[%d] AUTO SENSE[ ",
888 esp->host->unique_id, tgt, lun);
889 for (i = 0; i < 18; i++)
890 printk("%02x ", cmd->sense_buffer[i]);
891 printk("]\n");
892 }
893 }
894
895 cmd->scsi_done(cmd);
896
897 list_del(&ent->list);
898 esp_put_ent(esp, ent);
899
900 esp_maybe_execute_command(esp);
901}
902
903static unsigned int compose_result(unsigned int status, unsigned int message,
904 unsigned int driver_code)
905{
906 return (status | (message << 8) | (driver_code << 16));
907}
908
909static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent)
910{
911 struct scsi_device *dev = ent->cmd->device;
912 struct esp_lun_data *lp = dev->hostdata;
913
914 scsi_track_queue_full(dev, lp->num_tagged - 1);
915}
916
917static int esp_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
918{
919 struct scsi_device *dev = cmd->device;
Christoph Hellwig2b14ec72007-05-31 20:12:32 +0200920 struct esp *esp = shost_priv(dev->host);
David S. Millercd9ad582007-04-26 21:19:23 -0700921 struct esp_cmd_priv *spriv;
922 struct esp_cmd_entry *ent;
923
924 ent = esp_get_ent(esp);
925 if (!ent)
926 return SCSI_MLQUEUE_HOST_BUSY;
927
928 ent->cmd = cmd;
929
930 cmd->scsi_done = done;
931
932 spriv = ESP_CMD_PRIV(cmd);
933 spriv->u.dma_addr = ~(dma_addr_t)0x0;
934
935 list_add_tail(&ent->list, &esp->queued_cmds);
936
937 esp_maybe_execute_command(esp);
938
939 return 0;
940}
941
942static int esp_check_gross_error(struct esp *esp)
943{
944 if (esp->sreg & ESP_STAT_SPAM) {
945 /* Gross Error, could be one of:
946 * - top of fifo overwritten
947 * - top of command register overwritten
948 * - DMA programmed with wrong direction
949 * - improper phase change
950 */
951 printk(KERN_ERR PFX "esp%d: Gross error sreg[%02x]\n",
952 esp->host->unique_id, esp->sreg);
953 /* XXX Reset the chip. XXX */
954 return 1;
955 }
956 return 0;
957}
958
959static int esp_check_spur_intr(struct esp *esp)
960{
961 switch (esp->rev) {
962 case ESP100:
963 case ESP100A:
964 /* The interrupt pending bit of the status register cannot
965 * be trusted on these revisions.
966 */
967 esp->sreg &= ~ESP_STAT_INTR;
968 break;
969
970 default:
971 if (!(esp->sreg & ESP_STAT_INTR)) {
972 esp->ireg = esp_read8(ESP_INTRPT);
973 if (esp->ireg & ESP_INTR_SR)
974 return 1;
975
976 /* If the DMA is indicating interrupt pending and the
977 * ESP is not, the only possibility is a DMA error.
978 */
979 if (!esp->ops->dma_error(esp)) {
980 printk(KERN_ERR PFX "esp%d: Spurious irq, "
981 "sreg=%x.\n",
982 esp->host->unique_id, esp->sreg);
983 return -1;
984 }
985
986 printk(KERN_ERR PFX "esp%d: DMA error\n",
987 esp->host->unique_id);
988
989 /* XXX Reset the chip. XXX */
990 return -1;
991 }
992 break;
993 }
994
995 return 0;
996}
997
998static void esp_schedule_reset(struct esp *esp)
999{
1000 esp_log_reset("ESP: esp_schedule_reset() from %p\n",
1001 __builtin_return_address(0));
1002 esp->flags |= ESP_FLAG_RESETTING;
1003 esp_event(esp, ESP_EVENT_RESET);
1004}
1005
1006/* In order to avoid having to add a special half-reconnected state
1007 * into the driver we just sit here and poll through the rest of
1008 * the reselection process to get the tag message bytes.
1009 */
1010static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp,
1011 struct esp_lun_data *lp)
1012{
1013 struct esp_cmd_entry *ent;
1014 int i;
1015
1016 if (!lp->num_tagged) {
1017 printk(KERN_ERR PFX "esp%d: Reconnect w/num_tagged==0\n",
1018 esp->host->unique_id);
1019 return NULL;
1020 }
1021
1022 esp_log_reconnect("ESP: reconnect tag, ");
1023
1024 for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
1025 if (esp->ops->irq_pending(esp))
1026 break;
1027 }
1028 if (i == ESP_QUICKIRQ_LIMIT) {
1029 printk(KERN_ERR PFX "esp%d: Reconnect IRQ1 timeout\n",
1030 esp->host->unique_id);
1031 return NULL;
1032 }
1033
1034 esp->sreg = esp_read8(ESP_STATUS);
1035 esp->ireg = esp_read8(ESP_INTRPT);
1036
1037 esp_log_reconnect("IRQ(%d:%x:%x), ",
1038 i, esp->ireg, esp->sreg);
1039
1040 if (esp->ireg & ESP_INTR_DC) {
1041 printk(KERN_ERR PFX "esp%d: Reconnect, got disconnect.\n",
1042 esp->host->unique_id);
1043 return NULL;
1044 }
1045
1046 if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) {
1047 printk(KERN_ERR PFX "esp%d: Reconnect, not MIP sreg[%02x].\n",
1048 esp->host->unique_id, esp->sreg);
1049 return NULL;
1050 }
1051
1052 /* DMA in the tag bytes... */
1053 esp->command_block[0] = 0xff;
1054 esp->command_block[1] = 0xff;
1055 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
1056 2, 2, 1, ESP_CMD_DMA | ESP_CMD_TI);
1057
1058 /* ACK the msssage. */
1059 scsi_esp_cmd(esp, ESP_CMD_MOK);
1060
1061 for (i = 0; i < ESP_RESELECT_TAG_LIMIT; i++) {
1062 if (esp->ops->irq_pending(esp)) {
1063 esp->sreg = esp_read8(ESP_STATUS);
1064 esp->ireg = esp_read8(ESP_INTRPT);
1065 if (esp->ireg & ESP_INTR_FDONE)
1066 break;
1067 }
1068 udelay(1);
1069 }
1070 if (i == ESP_RESELECT_TAG_LIMIT) {
1071 printk(KERN_ERR PFX "esp%d: Reconnect IRQ2 timeout\n",
1072 esp->host->unique_id);
1073 return NULL;
1074 }
1075 esp->ops->dma_drain(esp);
1076 esp->ops->dma_invalidate(esp);
1077
1078 esp_log_reconnect("IRQ2(%d:%x:%x) tag[%x:%x]\n",
1079 i, esp->ireg, esp->sreg,
1080 esp->command_block[0],
1081 esp->command_block[1]);
1082
1083 if (esp->command_block[0] < SIMPLE_QUEUE_TAG ||
1084 esp->command_block[0] > ORDERED_QUEUE_TAG) {
1085 printk(KERN_ERR PFX "esp%d: Reconnect, bad tag "
1086 "type %02x.\n",
1087 esp->host->unique_id, esp->command_block[0]);
1088 return NULL;
1089 }
1090
1091 ent = lp->tagged_cmds[esp->command_block[1]];
1092 if (!ent) {
1093 printk(KERN_ERR PFX "esp%d: Reconnect, no entry for "
1094 "tag %02x.\n",
1095 esp->host->unique_id, esp->command_block[1]);
1096 return NULL;
1097 }
1098
1099 return ent;
1100}
1101
1102static int esp_reconnect(struct esp *esp)
1103{
1104 struct esp_cmd_entry *ent;
1105 struct esp_target_data *tp;
1106 struct esp_lun_data *lp;
1107 struct scsi_device *dev;
1108 int target, lun;
1109
1110 BUG_ON(esp->active_cmd);
1111 if (esp->rev == FASHME) {
1112 /* FASHME puts the target and lun numbers directly
1113 * into the fifo.
1114 */
1115 target = esp->fifo[0];
1116 lun = esp->fifo[1] & 0x7;
1117 } else {
1118 u8 bits = esp_read8(ESP_FDATA);
1119
1120 /* Older chips put the lun directly into the fifo, but
1121 * the target is given as a sample of the arbitration
1122 * lines on the bus at reselection time. So we should
1123 * see the ID of the ESP and the one reconnecting target
1124 * set in the bitmap.
1125 */
1126 if (!(bits & esp->scsi_id_mask))
1127 goto do_reset;
1128 bits &= ~esp->scsi_id_mask;
1129 if (!bits || (bits & (bits - 1)))
1130 goto do_reset;
1131
1132 target = ffs(bits) - 1;
1133 lun = (esp_read8(ESP_FDATA) & 0x7);
1134
1135 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1136 if (esp->rev == ESP100) {
1137 u8 ireg = esp_read8(ESP_INTRPT);
1138 /* This chip has a bug during reselection that can
1139 * cause a spurious illegal-command interrupt, which
1140 * we simply ACK here. Another possibility is a bus
1141 * reset so we must check for that.
1142 */
1143 if (ireg & ESP_INTR_SR)
1144 goto do_reset;
1145 }
1146 scsi_esp_cmd(esp, ESP_CMD_NULL);
1147 }
1148
1149 esp_write_tgt_sync(esp, target);
1150 esp_write_tgt_config3(esp, target);
1151
1152 scsi_esp_cmd(esp, ESP_CMD_MOK);
1153
1154 if (esp->rev == FASHME)
1155 esp_write8(target | ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT,
1156 ESP_BUSID);
1157
1158 tp = &esp->target[target];
1159 dev = __scsi_device_lookup_by_target(tp->starget, lun);
1160 if (!dev) {
1161 printk(KERN_ERR PFX "esp%d: Reconnect, no lp "
1162 "tgt[%u] lun[%u]\n",
1163 esp->host->unique_id, target, lun);
1164 goto do_reset;
1165 }
1166 lp = dev->hostdata;
1167
1168 ent = lp->non_tagged_cmd;
1169 if (!ent) {
1170 ent = esp_reconnect_with_tag(esp, lp);
1171 if (!ent)
1172 goto do_reset;
1173 }
1174
1175 esp->active_cmd = ent;
1176
1177 if (ent->flags & ESP_CMD_FLAG_ABORT) {
1178 esp->msg_out[0] = ABORT_TASK_SET;
1179 esp->msg_out_len = 1;
1180 scsi_esp_cmd(esp, ESP_CMD_SATN);
1181 }
1182
1183 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1184 esp_restore_pointers(esp, ent);
1185 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1186 return 1;
1187
1188do_reset:
1189 esp_schedule_reset(esp);
1190 return 0;
1191}
1192
1193static int esp_finish_select(struct esp *esp)
1194{
1195 struct esp_cmd_entry *ent;
1196 struct scsi_cmnd *cmd;
1197 u8 orig_select_state;
1198
1199 orig_select_state = esp->select_state;
1200
1201 /* No longer selecting. */
1202 esp->select_state = ESP_SELECT_NONE;
1203
1204 esp->seqreg = esp_read8(ESP_SSTEP) & ESP_STEP_VBITS;
1205 ent = esp->active_cmd;
1206 cmd = ent->cmd;
1207
1208 if (esp->ops->dma_error(esp)) {
1209 /* If we see a DMA error during or as a result of selection,
1210 * all bets are off.
1211 */
1212 esp_schedule_reset(esp);
1213 esp_cmd_is_done(esp, ent, cmd, (DID_ERROR << 16));
1214 return 0;
1215 }
1216
1217 esp->ops->dma_invalidate(esp);
1218
1219 if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) {
1220 struct esp_target_data *tp = &esp->target[cmd->device->id];
1221
1222 /* Carefully back out of the selection attempt. Release
1223 * resources (such as DMA mapping & TAG) and reset state (such
1224 * as message out and command delivery variables).
1225 */
1226 if (!(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1227 esp_unmap_dma(esp, cmd);
1228 esp_free_lun_tag(ent, cmd->device->hostdata);
1229 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE);
1230 esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
1231 esp->cmd_bytes_ptr = NULL;
1232 esp->cmd_bytes_left = 0;
1233 } else {
1234 esp->ops->unmap_single(esp, ent->sense_dma,
1235 SCSI_SENSE_BUFFERSIZE,
1236 DMA_FROM_DEVICE);
1237 ent->sense_ptr = NULL;
1238 }
1239
1240 /* Now that the state is unwound properly, put back onto
1241 * the issue queue. This command is no longer active.
1242 */
1243 list_del(&ent->list);
1244 list_add(&ent->list, &esp->queued_cmds);
1245 esp->active_cmd = NULL;
1246
1247 /* Return value ignored by caller, it directly invokes
1248 * esp_reconnect().
1249 */
1250 return 0;
1251 }
1252
1253 if (esp->ireg == ESP_INTR_DC) {
1254 struct scsi_device *dev = cmd->device;
1255
1256 /* Disconnect. Make sure we re-negotiate sync and
1257 * wide parameters if this target starts responding
1258 * again in the future.
1259 */
1260 esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO;
1261
1262 scsi_esp_cmd(esp, ESP_CMD_ESEL);
1263 esp_cmd_is_done(esp, ent, cmd, (DID_BAD_TARGET << 16));
1264 return 1;
1265 }
1266
1267 if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) {
1268 /* Selection successful. On pre-FAST chips we have
1269 * to do a NOP and possibly clean out the FIFO.
1270 */
1271 if (esp->rev <= ESP236) {
1272 int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1273
1274 scsi_esp_cmd(esp, ESP_CMD_NULL);
1275
1276 if (!fcnt &&
1277 (!esp->prev_soff ||
1278 ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP)))
1279 esp_flush_fifo(esp);
1280 }
1281
1282 /* If we are doing a slow command, negotiation, etc.
1283 * we'll do the right thing as we transition to the
1284 * next phase.
1285 */
1286 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1287 return 0;
1288 }
1289
1290 printk("ESP: Unexpected selection completion ireg[%x].\n",
1291 esp->ireg);
1292 esp_schedule_reset(esp);
1293 return 0;
1294}
1295
1296static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
1297 struct scsi_cmnd *cmd)
1298{
1299 int fifo_cnt, ecount, bytes_sent, flush_fifo;
1300
1301 fifo_cnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1302 if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE)
1303 fifo_cnt <<= 1;
1304
1305 ecount = 0;
1306 if (!(esp->sreg & ESP_STAT_TCNT)) {
1307 ecount = ((unsigned int)esp_read8(ESP_TCLOW) |
1308 (((unsigned int)esp_read8(ESP_TCMED)) << 8));
1309 if (esp->rev == FASHME)
1310 ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16;
1311 }
1312
1313 bytes_sent = esp->data_dma_len;
1314 bytes_sent -= ecount;
1315
1316 if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1317 bytes_sent -= fifo_cnt;
1318
1319 flush_fifo = 0;
1320 if (!esp->prev_soff) {
1321 /* Synchronous data transfer, always flush fifo. */
1322 flush_fifo = 1;
1323 } else {
1324 if (esp->rev == ESP100) {
1325 u32 fflags, phase;
1326
1327 /* ESP100 has a chip bug where in the synchronous data
1328 * phase it can mistake a final long REQ pulse from the
1329 * target as an extra data byte. Fun.
1330 *
1331 * To detect this case we resample the status register
1332 * and fifo flags. If we're still in a data phase and
1333 * we see spurious chunks in the fifo, we return error
1334 * to the caller which should reset and set things up
1335 * such that we only try future transfers to this
1336 * target in synchronous mode.
1337 */
1338 esp->sreg = esp_read8(ESP_STATUS);
1339 phase = esp->sreg & ESP_STAT_PMASK;
1340 fflags = esp_read8(ESP_FFLAGS);
1341
1342 if ((phase == ESP_DOP &&
1343 (fflags & ESP_FF_ONOTZERO)) ||
1344 (phase == ESP_DIP &&
1345 (fflags & ESP_FF_FBYTES)))
1346 return -1;
1347 }
1348 if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1349 flush_fifo = 1;
1350 }
1351
1352 if (flush_fifo)
1353 esp_flush_fifo(esp);
1354
1355 return bytes_sent;
1356}
1357
1358static void esp_setsync(struct esp *esp, struct esp_target_data *tp,
1359 u8 scsi_period, u8 scsi_offset,
1360 u8 esp_stp, u8 esp_soff)
1361{
1362 spi_period(tp->starget) = scsi_period;
1363 spi_offset(tp->starget) = scsi_offset;
1364 spi_width(tp->starget) = (tp->flags & ESP_TGT_WIDE) ? 1 : 0;
1365
1366 if (esp_soff) {
1367 esp_stp &= 0x1f;
1368 esp_soff |= esp->radelay;
1369 if (esp->rev >= FAS236) {
1370 u8 bit = ESP_CONFIG3_FSCSI;
1371 if (esp->rev >= FAS100A)
1372 bit = ESP_CONFIG3_FAST;
1373
1374 if (scsi_period < 50) {
1375 if (esp->rev == FASHME)
1376 esp_soff &= ~esp->radelay;
1377 tp->esp_config3 |= bit;
1378 } else {
1379 tp->esp_config3 &= ~bit;
1380 }
1381 esp->prev_cfg3 = tp->esp_config3;
1382 esp_write8(esp->prev_cfg3, ESP_CFG3);
1383 }
1384 }
1385
1386 tp->esp_period = esp->prev_stp = esp_stp;
1387 tp->esp_offset = esp->prev_soff = esp_soff;
1388
1389 esp_write8(esp_soff, ESP_SOFF);
1390 esp_write8(esp_stp, ESP_STP);
1391
1392 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1393
1394 spi_display_xfer_agreement(tp->starget);
1395}
1396
1397static void esp_msgin_reject(struct esp *esp)
1398{
1399 struct esp_cmd_entry *ent = esp->active_cmd;
1400 struct scsi_cmnd *cmd = ent->cmd;
1401 struct esp_target_data *tp;
1402 int tgt;
1403
1404 tgt = cmd->device->id;
1405 tp = &esp->target[tgt];
1406
1407 if (tp->flags & ESP_TGT_NEGO_WIDE) {
1408 tp->flags &= ~(ESP_TGT_NEGO_WIDE | ESP_TGT_WIDE);
1409
1410 if (!esp_need_to_nego_sync(tp)) {
1411 tp->flags &= ~ESP_TGT_CHECK_NEGO;
1412 scsi_esp_cmd(esp, ESP_CMD_RATN);
1413 } else {
1414 esp->msg_out_len =
1415 spi_populate_sync_msg(&esp->msg_out[0],
1416 tp->nego_goal_period,
1417 tp->nego_goal_offset);
1418 tp->flags |= ESP_TGT_NEGO_SYNC;
1419 scsi_esp_cmd(esp, ESP_CMD_SATN);
1420 }
1421 return;
1422 }
1423
1424 if (tp->flags & ESP_TGT_NEGO_SYNC) {
1425 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1426 tp->esp_period = 0;
1427 tp->esp_offset = 0;
1428 esp_setsync(esp, tp, 0, 0, 0, 0);
1429 scsi_esp_cmd(esp, ESP_CMD_RATN);
1430 return;
1431 }
1432
1433 esp->msg_out[0] = ABORT_TASK_SET;
1434 esp->msg_out_len = 1;
1435 scsi_esp_cmd(esp, ESP_CMD_SATN);
1436}
1437
1438static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
1439{
1440 u8 period = esp->msg_in[3];
1441 u8 offset = esp->msg_in[4];
1442 u8 stp;
1443
1444 if (!(tp->flags & ESP_TGT_NEGO_SYNC))
1445 goto do_reject;
1446
1447 if (offset > 15)
1448 goto do_reject;
1449
1450 if (offset) {
1451 int rounded_up, one_clock;
1452
1453 if (period > esp->max_period) {
1454 period = offset = 0;
1455 goto do_sdtr;
1456 }
1457 if (period < esp->min_period)
1458 goto do_reject;
1459
1460 one_clock = esp->ccycle / 1000;
1461 rounded_up = (period << 2);
1462 rounded_up = (rounded_up + one_clock - 1) / one_clock;
1463 stp = rounded_up;
1464 if (stp && esp->rev >= FAS236) {
1465 if (stp >= 50)
1466 stp--;
1467 }
1468 } else {
1469 stp = 0;
1470 }
1471
1472 esp_setsync(esp, tp, period, offset, stp, offset);
1473 return;
1474
1475do_reject:
1476 esp->msg_out[0] = MESSAGE_REJECT;
1477 esp->msg_out_len = 1;
1478 scsi_esp_cmd(esp, ESP_CMD_SATN);
1479 return;
1480
1481do_sdtr:
1482 tp->nego_goal_period = period;
1483 tp->nego_goal_offset = offset;
1484 esp->msg_out_len =
1485 spi_populate_sync_msg(&esp->msg_out[0],
1486 tp->nego_goal_period,
1487 tp->nego_goal_offset);
1488 scsi_esp_cmd(esp, ESP_CMD_SATN);
1489}
1490
1491static void esp_msgin_wdtr(struct esp *esp, struct esp_target_data *tp)
1492{
1493 int size = 8 << esp->msg_in[3];
1494 u8 cfg3;
1495
1496 if (esp->rev != FASHME)
1497 goto do_reject;
1498
1499 if (size != 8 && size != 16)
1500 goto do_reject;
1501
1502 if (!(tp->flags & ESP_TGT_NEGO_WIDE))
1503 goto do_reject;
1504
1505 cfg3 = tp->esp_config3;
1506 if (size == 16) {
1507 tp->flags |= ESP_TGT_WIDE;
1508 cfg3 |= ESP_CONFIG3_EWIDE;
1509 } else {
1510 tp->flags &= ~ESP_TGT_WIDE;
1511 cfg3 &= ~ESP_CONFIG3_EWIDE;
1512 }
1513 tp->esp_config3 = cfg3;
1514 esp->prev_cfg3 = cfg3;
1515 esp_write8(cfg3, ESP_CFG3);
1516
1517 tp->flags &= ~ESP_TGT_NEGO_WIDE;
1518
1519 spi_period(tp->starget) = 0;
1520 spi_offset(tp->starget) = 0;
1521 if (!esp_need_to_nego_sync(tp)) {
1522 tp->flags &= ~ESP_TGT_CHECK_NEGO;
1523 scsi_esp_cmd(esp, ESP_CMD_RATN);
1524 } else {
1525 esp->msg_out_len =
1526 spi_populate_sync_msg(&esp->msg_out[0],
1527 tp->nego_goal_period,
1528 tp->nego_goal_offset);
1529 tp->flags |= ESP_TGT_NEGO_SYNC;
1530 scsi_esp_cmd(esp, ESP_CMD_SATN);
1531 }
1532 return;
1533
1534do_reject:
1535 esp->msg_out[0] = MESSAGE_REJECT;
1536 esp->msg_out_len = 1;
1537 scsi_esp_cmd(esp, ESP_CMD_SATN);
1538}
1539
1540static void esp_msgin_extended(struct esp *esp)
1541{
1542 struct esp_cmd_entry *ent = esp->active_cmd;
1543 struct scsi_cmnd *cmd = ent->cmd;
1544 struct esp_target_data *tp;
1545 int tgt = cmd->device->id;
1546
1547 tp = &esp->target[tgt];
1548 if (esp->msg_in[2] == EXTENDED_SDTR) {
1549 esp_msgin_sdtr(esp, tp);
1550 return;
1551 }
1552 if (esp->msg_in[2] == EXTENDED_WDTR) {
1553 esp_msgin_wdtr(esp, tp);
1554 return;
1555 }
1556
1557 printk("ESP: Unexpected extended msg type %x\n",
1558 esp->msg_in[2]);
1559
1560 esp->msg_out[0] = ABORT_TASK_SET;
1561 esp->msg_out_len = 1;
1562 scsi_esp_cmd(esp, ESP_CMD_SATN);
1563}
1564
1565/* Analyze msgin bytes received from target so far. Return non-zero
1566 * if there are more bytes needed to complete the message.
1567 */
1568static int esp_msgin_process(struct esp *esp)
1569{
1570 u8 msg0 = esp->msg_in[0];
1571 int len = esp->msg_in_len;
1572
1573 if (msg0 & 0x80) {
1574 /* Identify */
1575 printk("ESP: Unexpected msgin identify\n");
1576 return 0;
1577 }
1578
1579 switch (msg0) {
1580 case EXTENDED_MESSAGE:
1581 if (len == 1)
1582 return 1;
1583 if (len < esp->msg_in[1] + 2)
1584 return 1;
1585 esp_msgin_extended(esp);
1586 return 0;
1587
1588 case IGNORE_WIDE_RESIDUE: {
1589 struct esp_cmd_entry *ent;
1590 struct esp_cmd_priv *spriv;
1591 if (len == 1)
1592 return 1;
1593
1594 if (esp->msg_in[1] != 1)
1595 goto do_reject;
1596
1597 ent = esp->active_cmd;
1598 spriv = ESP_CMD_PRIV(ent->cmd);
1599
1600 if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) {
1601 spriv->cur_sg--;
1602 spriv->cur_residue = 1;
1603 } else
1604 spriv->cur_residue++;
1605 spriv->tot_residue++;
1606 return 0;
1607 }
1608 case NOP:
1609 return 0;
1610 case RESTORE_POINTERS:
1611 esp_restore_pointers(esp, esp->active_cmd);
1612 return 0;
1613 case SAVE_POINTERS:
1614 esp_save_pointers(esp, esp->active_cmd);
1615 return 0;
1616
1617 case COMMAND_COMPLETE:
1618 case DISCONNECT: {
1619 struct esp_cmd_entry *ent = esp->active_cmd;
1620
1621 ent->message = msg0;
1622 esp_event(esp, ESP_EVENT_FREE_BUS);
1623 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1624 return 0;
1625 }
1626 case MESSAGE_REJECT:
1627 esp_msgin_reject(esp);
1628 return 0;
1629
1630 default:
1631 do_reject:
1632 esp->msg_out[0] = MESSAGE_REJECT;
1633 esp->msg_out_len = 1;
1634 scsi_esp_cmd(esp, ESP_CMD_SATN);
1635 return 0;
1636 }
1637}
1638
1639static int esp_process_event(struct esp *esp)
1640{
1641 int write;
1642
1643again:
1644 write = 0;
1645 switch (esp->event) {
1646 case ESP_EVENT_CHECK_PHASE:
1647 switch (esp->sreg & ESP_STAT_PMASK) {
1648 case ESP_DOP:
1649 esp_event(esp, ESP_EVENT_DATA_OUT);
1650 break;
1651 case ESP_DIP:
1652 esp_event(esp, ESP_EVENT_DATA_IN);
1653 break;
1654 case ESP_STATP:
1655 esp_flush_fifo(esp);
1656 scsi_esp_cmd(esp, ESP_CMD_ICCSEQ);
1657 esp_event(esp, ESP_EVENT_STATUS);
1658 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1659 return 1;
1660
1661 case ESP_MOP:
1662 esp_event(esp, ESP_EVENT_MSGOUT);
1663 break;
1664
1665 case ESP_MIP:
1666 esp_event(esp, ESP_EVENT_MSGIN);
1667 break;
1668
1669 case ESP_CMDP:
1670 esp_event(esp, ESP_EVENT_CMD_START);
1671 break;
1672
1673 default:
1674 printk("ESP: Unexpected phase, sreg=%02x\n",
1675 esp->sreg);
1676 esp_schedule_reset(esp);
1677 return 0;
1678 }
1679 goto again;
1680 break;
1681
1682 case ESP_EVENT_DATA_IN:
1683 write = 1;
1684 /* fallthru */
1685
1686 case ESP_EVENT_DATA_OUT: {
1687 struct esp_cmd_entry *ent = esp->active_cmd;
1688 struct scsi_cmnd *cmd = ent->cmd;
1689 dma_addr_t dma_addr = esp_cur_dma_addr(ent, cmd);
1690 unsigned int dma_len = esp_cur_dma_len(ent, cmd);
1691
1692 if (esp->rev == ESP100)
1693 scsi_esp_cmd(esp, ESP_CMD_NULL);
1694
1695 if (write)
1696 ent->flags |= ESP_CMD_FLAG_WRITE;
1697 else
1698 ent->flags &= ~ESP_CMD_FLAG_WRITE;
1699
1700 dma_len = esp_dma_length_limit(esp, dma_addr, dma_len);
1701 esp->data_dma_len = dma_len;
1702
1703 if (!dma_len) {
1704 printk(KERN_ERR PFX "esp%d: DMA length is zero!\n",
1705 esp->host->unique_id);
Alexey Dobriyane1f2a092007-04-27 15:19:27 -07001706 printk(KERN_ERR PFX "esp%d: cur adr[%08llx] len[%08x]\n",
David S. Millercd9ad582007-04-26 21:19:23 -07001707 esp->host->unique_id,
Alexey Dobriyane1f2a092007-04-27 15:19:27 -07001708 (unsigned long long)esp_cur_dma_addr(ent, cmd),
David S. Millercd9ad582007-04-26 21:19:23 -07001709 esp_cur_dma_len(ent, cmd));
1710 esp_schedule_reset(esp);
1711 return 0;
1712 }
1713
Alexey Dobriyane1f2a092007-04-27 15:19:27 -07001714 esp_log_datastart("ESP: start data addr[%08llx] len[%u] "
David S. Millercd9ad582007-04-26 21:19:23 -07001715 "write(%d)\n",
Alexey Dobriyane1f2a092007-04-27 15:19:27 -07001716 (unsigned long long)dma_addr, dma_len, write);
David S. Millercd9ad582007-04-26 21:19:23 -07001717
1718 esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len,
1719 write, ESP_CMD_DMA | ESP_CMD_TI);
1720 esp_event(esp, ESP_EVENT_DATA_DONE);
1721 break;
1722 }
1723 case ESP_EVENT_DATA_DONE: {
1724 struct esp_cmd_entry *ent = esp->active_cmd;
1725 struct scsi_cmnd *cmd = ent->cmd;
1726 int bytes_sent;
1727
1728 if (esp->ops->dma_error(esp)) {
1729 printk("ESP: data done, DMA error, resetting\n");
1730 esp_schedule_reset(esp);
1731 return 0;
1732 }
1733
1734 if (ent->flags & ESP_CMD_FLAG_WRITE) {
1735 /* XXX parity errors, etc. XXX */
1736
1737 esp->ops->dma_drain(esp);
1738 }
1739 esp->ops->dma_invalidate(esp);
1740
1741 if (esp->ireg != ESP_INTR_BSERV) {
1742 /* We should always see exactly a bus-service
1743 * interrupt at the end of a successful transfer.
1744 */
1745 printk("ESP: data done, not BSERV, resetting\n");
1746 esp_schedule_reset(esp);
1747 return 0;
1748 }
1749
1750 bytes_sent = esp_data_bytes_sent(esp, ent, cmd);
1751
1752 esp_log_datadone("ESP: data done flgs[%x] sent[%d]\n",
1753 ent->flags, bytes_sent);
1754
1755 if (bytes_sent < 0) {
1756 /* XXX force sync mode for this target XXX */
1757 esp_schedule_reset(esp);
1758 return 0;
1759 }
1760
1761 esp_advance_dma(esp, ent, cmd, bytes_sent);
1762 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1763 goto again;
1764 break;
1765 }
1766
1767 case ESP_EVENT_STATUS: {
1768 struct esp_cmd_entry *ent = esp->active_cmd;
1769
1770 if (esp->ireg & ESP_INTR_FDONE) {
1771 ent->status = esp_read8(ESP_FDATA);
1772 ent->message = esp_read8(ESP_FDATA);
1773 scsi_esp_cmd(esp, ESP_CMD_MOK);
1774 } else if (esp->ireg == ESP_INTR_BSERV) {
1775 ent->status = esp_read8(ESP_FDATA);
1776 ent->message = 0xff;
1777 esp_event(esp, ESP_EVENT_MSGIN);
1778 return 0;
1779 }
1780
1781 if (ent->message != COMMAND_COMPLETE) {
1782 printk("ESP: Unexpected message %x in status\n",
1783 ent->message);
1784 esp_schedule_reset(esp);
1785 return 0;
1786 }
1787
1788 esp_event(esp, ESP_EVENT_FREE_BUS);
1789 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1790 break;
1791 }
1792 case ESP_EVENT_FREE_BUS: {
1793 struct esp_cmd_entry *ent = esp->active_cmd;
1794 struct scsi_cmnd *cmd = ent->cmd;
1795
1796 if (ent->message == COMMAND_COMPLETE ||
1797 ent->message == DISCONNECT)
1798 scsi_esp_cmd(esp, ESP_CMD_ESEL);
1799
1800 if (ent->message == COMMAND_COMPLETE) {
1801 esp_log_cmddone("ESP: Command done status[%x] "
1802 "message[%x]\n",
1803 ent->status, ent->message);
1804 if (ent->status == SAM_STAT_TASK_SET_FULL)
1805 esp_event_queue_full(esp, ent);
1806
1807 if (ent->status == SAM_STAT_CHECK_CONDITION &&
1808 !(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1809 ent->flags |= ESP_CMD_FLAG_AUTOSENSE;
1810 esp_autosense(esp, ent);
1811 } else {
1812 esp_cmd_is_done(esp, ent, cmd,
1813 compose_result(ent->status,
1814 ent->message,
1815 DID_OK));
1816 }
1817 } else if (ent->message == DISCONNECT) {
1818 esp_log_disconnect("ESP: Disconnecting tgt[%d] "
1819 "tag[%x:%x]\n",
1820 cmd->device->id,
1821 ent->tag[0], ent->tag[1]);
1822
1823 esp->active_cmd = NULL;
1824 esp_maybe_execute_command(esp);
1825 } else {
1826 printk("ESP: Unexpected message %x in freebus\n",
1827 ent->message);
1828 esp_schedule_reset(esp);
1829 return 0;
1830 }
1831 if (esp->active_cmd)
1832 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1833 break;
1834 }
1835 case ESP_EVENT_MSGOUT: {
1836 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1837
1838 if (esp_debug & ESP_DEBUG_MSGOUT) {
1839 int i;
1840 printk("ESP: Sending message [ ");
1841 for (i = 0; i < esp->msg_out_len; i++)
1842 printk("%02x ", esp->msg_out[i]);
1843 printk("]\n");
1844 }
1845
1846 if (esp->rev == FASHME) {
1847 int i;
1848
1849 /* Always use the fifo. */
1850 for (i = 0; i < esp->msg_out_len; i++) {
1851 esp_write8(esp->msg_out[i], ESP_FDATA);
1852 esp_write8(0, ESP_FDATA);
1853 }
1854 scsi_esp_cmd(esp, ESP_CMD_TI);
1855 } else {
1856 if (esp->msg_out_len == 1) {
1857 esp_write8(esp->msg_out[0], ESP_FDATA);
1858 scsi_esp_cmd(esp, ESP_CMD_TI);
1859 } else {
1860 /* Use DMA. */
1861 memcpy(esp->command_block,
1862 esp->msg_out,
1863 esp->msg_out_len);
1864
1865 esp->ops->send_dma_cmd(esp,
1866 esp->command_block_dma,
1867 esp->msg_out_len,
1868 esp->msg_out_len,
1869 0,
1870 ESP_CMD_DMA|ESP_CMD_TI);
1871 }
1872 }
1873 esp_event(esp, ESP_EVENT_MSGOUT_DONE);
1874 break;
1875 }
1876 case ESP_EVENT_MSGOUT_DONE:
1877 if (esp->rev == FASHME) {
1878 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1879 } else {
1880 if (esp->msg_out_len > 1)
1881 esp->ops->dma_invalidate(esp);
1882 }
1883
1884 if (!(esp->ireg & ESP_INTR_DC)) {
1885 if (esp->rev != FASHME)
1886 scsi_esp_cmd(esp, ESP_CMD_NULL);
1887 }
1888 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1889 goto again;
1890 case ESP_EVENT_MSGIN:
1891 if (esp->ireg & ESP_INTR_BSERV) {
1892 if (esp->rev == FASHME) {
1893 if (!(esp_read8(ESP_STATUS2) &
1894 ESP_STAT2_FEMPTY))
1895 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1896 } else {
1897 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1898 if (esp->rev == ESP100)
1899 scsi_esp_cmd(esp, ESP_CMD_NULL);
1900 }
1901 scsi_esp_cmd(esp, ESP_CMD_TI);
1902 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1903 return 1;
1904 }
1905 if (esp->ireg & ESP_INTR_FDONE) {
1906 u8 val;
1907
1908 if (esp->rev == FASHME)
1909 val = esp->fifo[0];
1910 else
1911 val = esp_read8(ESP_FDATA);
1912 esp->msg_in[esp->msg_in_len++] = val;
1913
1914 esp_log_msgin("ESP: Got msgin byte %x\n", val);
1915
1916 if (!esp_msgin_process(esp))
1917 esp->msg_in_len = 0;
1918
1919 if (esp->rev == FASHME)
1920 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1921
1922 scsi_esp_cmd(esp, ESP_CMD_MOK);
1923
1924 if (esp->event != ESP_EVENT_FREE_BUS)
1925 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1926 } else {
1927 printk("ESP: MSGIN neither BSERV not FDON, resetting");
1928 esp_schedule_reset(esp);
1929 return 0;
1930 }
1931 break;
1932 case ESP_EVENT_CMD_START:
1933 memcpy(esp->command_block, esp->cmd_bytes_ptr,
1934 esp->cmd_bytes_left);
1935 if (esp->rev == FASHME)
1936 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1937 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
1938 esp->cmd_bytes_left, 16, 0,
1939 ESP_CMD_DMA | ESP_CMD_TI);
1940 esp_event(esp, ESP_EVENT_CMD_DONE);
1941 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1942 break;
1943 case ESP_EVENT_CMD_DONE:
1944 esp->ops->dma_invalidate(esp);
1945 if (esp->ireg & ESP_INTR_BSERV) {
1946 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1947 goto again;
1948 }
1949 esp_schedule_reset(esp);
1950 return 0;
1951 break;
1952
1953 case ESP_EVENT_RESET:
1954 scsi_esp_cmd(esp, ESP_CMD_RS);
1955 break;
1956
1957 default:
1958 printk("ESP: Unexpected event %x, resetting\n",
1959 esp->event);
1960 esp_schedule_reset(esp);
1961 return 0;
1962 break;
1963 }
1964 return 1;
1965}
1966
1967static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent)
1968{
1969 struct scsi_cmnd *cmd = ent->cmd;
1970
1971 esp_unmap_dma(esp, cmd);
1972 esp_free_lun_tag(ent, cmd->device->hostdata);
1973 cmd->result = DID_RESET << 16;
1974
1975 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
1976 esp->ops->unmap_single(esp, ent->sense_dma,
1977 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
1978 ent->sense_ptr = NULL;
1979 }
1980
1981 cmd->scsi_done(cmd);
1982 list_del(&ent->list);
1983 esp_put_ent(esp, ent);
1984}
1985
1986static void esp_clear_hold(struct scsi_device *dev, void *data)
1987{
1988 struct esp_lun_data *lp = dev->hostdata;
1989
1990 BUG_ON(lp->num_tagged);
1991 lp->hold = 0;
1992}
1993
1994static void esp_reset_cleanup(struct esp *esp)
1995{
1996 struct esp_cmd_entry *ent, *tmp;
1997 int i;
1998
1999 list_for_each_entry_safe(ent, tmp, &esp->queued_cmds, list) {
2000 struct scsi_cmnd *cmd = ent->cmd;
2001
2002 list_del(&ent->list);
2003 cmd->result = DID_RESET << 16;
2004 cmd->scsi_done(cmd);
2005 esp_put_ent(esp, ent);
2006 }
2007
2008 list_for_each_entry_safe(ent, tmp, &esp->active_cmds, list) {
2009 if (ent == esp->active_cmd)
2010 esp->active_cmd = NULL;
2011 esp_reset_cleanup_one(esp, ent);
2012 }
2013
2014 BUG_ON(esp->active_cmd != NULL);
2015
2016 /* Force renegotiation of sync/wide transfers. */
2017 for (i = 0; i < ESP_MAX_TARGET; i++) {
2018 struct esp_target_data *tp = &esp->target[i];
2019
2020 tp->esp_period = 0;
2021 tp->esp_offset = 0;
2022 tp->esp_config3 &= ~(ESP_CONFIG3_EWIDE |
2023 ESP_CONFIG3_FSCSI |
2024 ESP_CONFIG3_FAST);
2025 tp->flags &= ~ESP_TGT_WIDE;
2026 tp->flags |= ESP_TGT_CHECK_NEGO;
2027
2028 if (tp->starget)
2029 starget_for_each_device(tp->starget, NULL,
2030 esp_clear_hold);
2031 }
Thomas Bogendoerfer204abf22007-06-13 12:58:53 -07002032 esp->flags &= ~ESP_FLAG_RESETTING;
David S. Millercd9ad582007-04-26 21:19:23 -07002033}
2034
2035/* Runs under host->lock */
2036static void __esp_interrupt(struct esp *esp)
2037{
2038 int finish_reset, intr_done;
2039 u8 phase;
2040
2041 esp->sreg = esp_read8(ESP_STATUS);
2042
2043 if (esp->flags & ESP_FLAG_RESETTING) {
2044 finish_reset = 1;
2045 } else {
2046 if (esp_check_gross_error(esp))
2047 return;
2048
2049 finish_reset = esp_check_spur_intr(esp);
2050 if (finish_reset < 0)
2051 return;
2052 }
2053
2054 esp->ireg = esp_read8(ESP_INTRPT);
2055
2056 if (esp->ireg & ESP_INTR_SR)
2057 finish_reset = 1;
2058
2059 if (finish_reset) {
2060 esp_reset_cleanup(esp);
2061 if (esp->eh_reset) {
2062 complete(esp->eh_reset);
2063 esp->eh_reset = NULL;
2064 }
2065 return;
2066 }
2067
2068 phase = (esp->sreg & ESP_STAT_PMASK);
2069 if (esp->rev == FASHME) {
2070 if (((phase != ESP_DIP && phase != ESP_DOP) &&
2071 esp->select_state == ESP_SELECT_NONE &&
2072 esp->event != ESP_EVENT_STATUS &&
2073 esp->event != ESP_EVENT_DATA_DONE) ||
2074 (esp->ireg & ESP_INTR_RSEL)) {
2075 esp->sreg2 = esp_read8(ESP_STATUS2);
2076 if (!(esp->sreg2 & ESP_STAT2_FEMPTY) ||
2077 (esp->sreg2 & ESP_STAT2_F1BYTE))
2078 hme_read_fifo(esp);
2079 }
2080 }
2081
2082 esp_log_intr("ESP: intr sreg[%02x] seqreg[%02x] "
2083 "sreg2[%02x] ireg[%02x]\n",
2084 esp->sreg, esp->seqreg, esp->sreg2, esp->ireg);
2085
2086 intr_done = 0;
2087
2088 if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) {
2089 printk("ESP: unexpected IREG %02x\n", esp->ireg);
2090 if (esp->ireg & ESP_INTR_IC)
2091 esp_dump_cmd_log(esp);
2092
2093 esp_schedule_reset(esp);
2094 } else {
2095 if (!(esp->ireg & ESP_INTR_RSEL)) {
2096 /* Some combination of FDONE, BSERV, DC. */
2097 if (esp->select_state != ESP_SELECT_NONE)
2098 intr_done = esp_finish_select(esp);
2099 } else if (esp->ireg & ESP_INTR_RSEL) {
2100 if (esp->active_cmd)
2101 (void) esp_finish_select(esp);
2102 intr_done = esp_reconnect(esp);
2103 }
2104 }
2105 while (!intr_done)
2106 intr_done = esp_process_event(esp);
2107}
2108
2109irqreturn_t scsi_esp_intr(int irq, void *dev_id)
2110{
2111 struct esp *esp = dev_id;
2112 unsigned long flags;
2113 irqreturn_t ret;
2114
2115 spin_lock_irqsave(esp->host->host_lock, flags);
2116 ret = IRQ_NONE;
2117 if (esp->ops->irq_pending(esp)) {
2118 ret = IRQ_HANDLED;
2119 for (;;) {
2120 int i;
2121
2122 __esp_interrupt(esp);
2123 if (!(esp->flags & ESP_FLAG_QUICKIRQ_CHECK))
2124 break;
2125 esp->flags &= ~ESP_FLAG_QUICKIRQ_CHECK;
2126
2127 for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
2128 if (esp->ops->irq_pending(esp))
2129 break;
2130 }
2131 if (i == ESP_QUICKIRQ_LIMIT)
2132 break;
2133 }
2134 }
2135 spin_unlock_irqrestore(esp->host->host_lock, flags);
2136
2137 return ret;
2138}
2139EXPORT_SYMBOL(scsi_esp_intr);
2140
2141static void __devinit esp_get_revision(struct esp *esp)
2142{
2143 u8 val;
2144
2145 esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7));
2146 esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY);
2147 esp_write8(esp->config2, ESP_CFG2);
2148
2149 val = esp_read8(ESP_CFG2);
2150 val &= ~ESP_CONFIG2_MAGIC;
2151 if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) {
2152 /* If what we write to cfg2 does not come back, cfg2 is not
2153 * implemented, therefore this must be a plain esp100.
2154 */
2155 esp->rev = ESP100;
2156 } else {
2157 esp->config2 = 0;
2158 esp_set_all_config3(esp, 5);
2159 esp->prev_cfg3 = 5;
2160 esp_write8(esp->config2, ESP_CFG2);
2161 esp_write8(0, ESP_CFG3);
2162 esp_write8(esp->prev_cfg3, ESP_CFG3);
2163
2164 val = esp_read8(ESP_CFG3);
2165 if (val != 5) {
2166 /* The cfg2 register is implemented, however
2167 * cfg3 is not, must be esp100a.
2168 */
2169 esp->rev = ESP100A;
2170 } else {
2171 esp_set_all_config3(esp, 0);
2172 esp->prev_cfg3 = 0;
2173 esp_write8(esp->prev_cfg3, ESP_CFG3);
2174
2175 /* All of cfg{1,2,3} implemented, must be one of
2176 * the fas variants, figure out which one.
2177 */
2178 if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) {
2179 esp->rev = FAST;
2180 esp->sync_defp = SYNC_DEFP_FAST;
2181 } else {
2182 esp->rev = ESP236;
2183 }
2184 esp->config2 = 0;
2185 esp_write8(esp->config2, ESP_CFG2);
2186 }
2187 }
2188}
2189
2190static void __devinit esp_init_swstate(struct esp *esp)
2191{
2192 int i;
2193
2194 INIT_LIST_HEAD(&esp->queued_cmds);
2195 INIT_LIST_HEAD(&esp->active_cmds);
2196 INIT_LIST_HEAD(&esp->esp_cmd_pool);
2197
2198 /* Start with a clear state, domain validation (via ->slave_configure,
2199 * spi_dv_device()) will attempt to enable SYNC, WIDE, and tagged
2200 * commands.
2201 */
2202 for (i = 0 ; i < ESP_MAX_TARGET; i++) {
2203 esp->target[i].flags = 0;
2204 esp->target[i].nego_goal_period = 0;
2205 esp->target[i].nego_goal_offset = 0;
2206 esp->target[i].nego_goal_width = 0;
2207 esp->target[i].nego_goal_tags = 0;
2208 }
2209}
2210
2211/* This places the ESP into a known state at boot time. */
Martin Habetsd679f802007-05-07 14:05:03 -07002212static void esp_bootup_reset(struct esp *esp)
David S. Millercd9ad582007-04-26 21:19:23 -07002213{
2214 u8 val;
2215
2216 /* Reset the DMA */
2217 esp->ops->reset_dma(esp);
2218
2219 /* Reset the ESP */
2220 esp_reset_esp(esp);
2221
2222 /* Reset the SCSI bus, but tell ESP not to generate an irq */
2223 val = esp_read8(ESP_CFG1);
2224 val |= ESP_CONFIG1_SRRDISAB;
2225 esp_write8(val, ESP_CFG1);
2226
2227 scsi_esp_cmd(esp, ESP_CMD_RS);
2228 udelay(400);
2229
2230 esp_write8(esp->config1, ESP_CFG1);
2231
2232 /* Eat any bitrot in the chip and we are done... */
2233 esp_read8(ESP_INTRPT);
2234}
2235
2236static void __devinit esp_set_clock_params(struct esp *esp)
2237{
2238 int fmhz;
2239 u8 ccf;
2240
2241 /* This is getting messy but it has to be done correctly or else
2242 * you get weird behavior all over the place. We are trying to
2243 * basically figure out three pieces of information.
2244 *
2245 * a) Clock Conversion Factor
2246 *
2247 * This is a representation of the input crystal clock frequency
2248 * going into the ESP on this machine. Any operation whose timing
2249 * is longer than 400ns depends on this value being correct. For
2250 * example, you'll get blips for arbitration/selection during high
2251 * load or with multiple targets if this is not set correctly.
2252 *
2253 * b) Selection Time-Out
2254 *
2255 * The ESP isn't very bright and will arbitrate for the bus and try
2256 * to select a target forever if you let it. This value tells the
2257 * ESP when it has taken too long to negotiate and that it should
2258 * interrupt the CPU so we can see what happened. The value is
2259 * computed as follows (from NCR/Symbios chip docs).
2260 *
2261 * (Time Out Period) * (Input Clock)
2262 * STO = ----------------------------------
2263 * (8192) * (Clock Conversion Factor)
2264 *
2265 * We use a time out period of 250ms (ESP_BUS_TIMEOUT).
2266 *
2267 * c) Imperical constants for synchronous offset and transfer period
2268 * register values
2269 *
2270 * This entails the smallest and largest sync period we could ever
2271 * handle on this ESP.
2272 */
2273 fmhz = esp->cfreq;
2274
2275 ccf = ((fmhz / 1000000) + 4) / 5;
2276 if (ccf == 1)
2277 ccf = 2;
2278
2279 /* If we can't find anything reasonable, just assume 20MHZ.
2280 * This is the clock frequency of the older sun4c's where I've
2281 * been unable to find the clock-frequency PROM property. All
2282 * other machines provide useful values it seems.
2283 */
2284 if (fmhz <= 5000000 || ccf < 1 || ccf > 8) {
2285 fmhz = 20000000;
2286 ccf = 4;
2287 }
2288
2289 esp->cfact = (ccf == 8 ? 0 : ccf);
2290 esp->cfreq = fmhz;
2291 esp->ccycle = ESP_MHZ_TO_CYCLE(fmhz);
2292 esp->ctick = ESP_TICK(ccf, esp->ccycle);
2293 esp->neg_defp = ESP_NEG_DEFP(fmhz, ccf);
2294 esp->sync_defp = SYNC_DEFP_SLOW;
2295}
2296
2297static const char *esp_chip_names[] = {
2298 "ESP100",
2299 "ESP100A",
2300 "ESP236",
2301 "FAS236",
2302 "FAS100A",
2303 "FAST",
2304 "FASHME",
2305};
2306
2307static struct scsi_transport_template *esp_transport_template;
2308
2309int __devinit scsi_esp_register(struct esp *esp, struct device *dev)
2310{
2311 static int instance;
2312 int err;
2313
2314 esp->host->transportt = esp_transport_template;
2315 esp->host->max_lun = ESP_MAX_LUN;
2316 esp->host->cmd_per_lun = 2;
2317
2318 esp_set_clock_params(esp);
2319
2320 esp_get_revision(esp);
2321
2322 esp_init_swstate(esp);
2323
2324 esp_bootup_reset(esp);
2325
2326 printk(KERN_INFO PFX "esp%u, regs[%1p:%1p] irq[%u]\n",
2327 esp->host->unique_id, esp->regs, esp->dma_regs,
2328 esp->host->irq);
2329 printk(KERN_INFO PFX "esp%u is a %s, %u MHz (ccf=%u), SCSI ID %u\n",
2330 esp->host->unique_id, esp_chip_names[esp->rev],
2331 esp->cfreq / 1000000, esp->cfact, esp->scsi_id);
2332
2333 /* Let the SCSI bus reset settle. */
2334 ssleep(esp_bus_reset_settle);
2335
2336 err = scsi_add_host(esp->host, dev);
2337 if (err)
2338 return err;
2339
2340 esp->host->unique_id = instance++;
2341
2342 scsi_scan_host(esp->host);
2343
2344 return 0;
2345}
2346EXPORT_SYMBOL(scsi_esp_register);
2347
2348void __devexit scsi_esp_unregister(struct esp *esp)
2349{
2350 scsi_remove_host(esp->host);
2351}
2352EXPORT_SYMBOL(scsi_esp_unregister);
2353
2354static int esp_slave_alloc(struct scsi_device *dev)
2355{
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002356 struct esp *esp = shost_priv(dev->host);
David S. Millercd9ad582007-04-26 21:19:23 -07002357 struct esp_target_data *tp = &esp->target[dev->id];
2358 struct esp_lun_data *lp;
2359
2360 lp = kzalloc(sizeof(*lp), GFP_KERNEL);
2361 if (!lp)
2362 return -ENOMEM;
2363 dev->hostdata = lp;
2364
2365 tp->starget = dev->sdev_target;
2366
2367 spi_min_period(tp->starget) = esp->min_period;
2368 spi_max_offset(tp->starget) = 15;
2369
2370 if (esp->flags & ESP_FLAG_WIDE_CAPABLE)
2371 spi_max_width(tp->starget) = 1;
2372 else
2373 spi_max_width(tp->starget) = 0;
2374
2375 return 0;
2376}
2377
2378static int esp_slave_configure(struct scsi_device *dev)
2379{
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002380 struct esp *esp = shost_priv(dev->host);
David S. Millercd9ad582007-04-26 21:19:23 -07002381 struct esp_target_data *tp = &esp->target[dev->id];
2382 int goal_tags, queue_depth;
2383
2384 goal_tags = 0;
2385
2386 if (dev->tagged_supported) {
2387 /* XXX make this configurable somehow XXX */
2388 goal_tags = ESP_DEFAULT_TAGS;
2389
2390 if (goal_tags > ESP_MAX_TAG)
2391 goal_tags = ESP_MAX_TAG;
2392 }
2393
2394 queue_depth = goal_tags;
2395 if (queue_depth < dev->host->cmd_per_lun)
2396 queue_depth = dev->host->cmd_per_lun;
2397
2398 if (goal_tags) {
2399 scsi_set_tag_type(dev, MSG_ORDERED_TAG);
2400 scsi_activate_tcq(dev, queue_depth);
2401 } else {
2402 scsi_deactivate_tcq(dev, queue_depth);
2403 }
2404 tp->flags |= ESP_TGT_DISCONNECT;
2405
2406 if (!spi_initial_dv(dev->sdev_target))
2407 spi_dv_device(dev);
2408
2409 return 0;
2410}
2411
2412static void esp_slave_destroy(struct scsi_device *dev)
2413{
2414 struct esp_lun_data *lp = dev->hostdata;
2415
2416 kfree(lp);
2417 dev->hostdata = NULL;
2418}
2419
2420static int esp_eh_abort_handler(struct scsi_cmnd *cmd)
2421{
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002422 struct esp *esp = shost_priv(cmd->device->host);
David S. Millercd9ad582007-04-26 21:19:23 -07002423 struct esp_cmd_entry *ent, *tmp;
2424 struct completion eh_done;
2425 unsigned long flags;
2426
2427 /* XXX This helps a lot with debugging but might be a bit
2428 * XXX much for the final driver.
2429 */
2430 spin_lock_irqsave(esp->host->host_lock, flags);
2431 printk(KERN_ERR PFX "esp%d: Aborting command [%p:%02x]\n",
2432 esp->host->unique_id, cmd, cmd->cmnd[0]);
2433 ent = esp->active_cmd;
2434 if (ent)
2435 printk(KERN_ERR PFX "esp%d: Current command [%p:%02x]\n",
2436 esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]);
2437 list_for_each_entry(ent, &esp->queued_cmds, list) {
2438 printk(KERN_ERR PFX "esp%d: Queued command [%p:%02x]\n",
2439 esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]);
2440 }
2441 list_for_each_entry(ent, &esp->active_cmds, list) {
2442 printk(KERN_ERR PFX "esp%d: Active command [%p:%02x]\n",
2443 esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]);
2444 }
2445 esp_dump_cmd_log(esp);
2446 spin_unlock_irqrestore(esp->host->host_lock, flags);
2447
2448 spin_lock_irqsave(esp->host->host_lock, flags);
2449
2450 ent = NULL;
2451 list_for_each_entry(tmp, &esp->queued_cmds, list) {
2452 if (tmp->cmd == cmd) {
2453 ent = tmp;
2454 break;
2455 }
2456 }
2457
2458 if (ent) {
2459 /* Easiest case, we didn't even issue the command
2460 * yet so it is trivial to abort.
2461 */
2462 list_del(&ent->list);
2463
2464 cmd->result = DID_ABORT << 16;
2465 cmd->scsi_done(cmd);
2466
2467 esp_put_ent(esp, ent);
2468
2469 goto out_success;
2470 }
2471
2472 init_completion(&eh_done);
2473
2474 ent = esp->active_cmd;
2475 if (ent && ent->cmd == cmd) {
2476 /* Command is the currently active command on
2477 * the bus. If we already have an output message
2478 * pending, no dice.
2479 */
2480 if (esp->msg_out_len)
2481 goto out_failure;
2482
2483 /* Send out an abort, encouraging the target to
2484 * go to MSGOUT phase by asserting ATN.
2485 */
2486 esp->msg_out[0] = ABORT_TASK_SET;
2487 esp->msg_out_len = 1;
2488 ent->eh_done = &eh_done;
2489
2490 scsi_esp_cmd(esp, ESP_CMD_SATN);
2491 } else {
2492 /* The command is disconnected. This is not easy to
2493 * abort. For now we fail and let the scsi error
2494 * handling layer go try a scsi bus reset or host
2495 * reset.
2496 *
2497 * What we could do is put together a scsi command
2498 * solely for the purpose of sending an abort message
2499 * to the target. Coming up with all the code to
2500 * cook up scsi commands, special case them everywhere,
2501 * etc. is for questionable gain and it would be better
2502 * if the generic scsi error handling layer could do at
2503 * least some of that for us.
2504 *
2505 * Anyways this is an area for potential future improvement
2506 * in this driver.
2507 */
2508 goto out_failure;
2509 }
2510
2511 spin_unlock_irqrestore(esp->host->host_lock, flags);
2512
2513 if (!wait_for_completion_timeout(&eh_done, 5 * HZ)) {
2514 spin_lock_irqsave(esp->host->host_lock, flags);
2515 ent->eh_done = NULL;
2516 spin_unlock_irqrestore(esp->host->host_lock, flags);
2517
2518 return FAILED;
2519 }
2520
2521 return SUCCESS;
2522
2523out_success:
2524 spin_unlock_irqrestore(esp->host->host_lock, flags);
2525 return SUCCESS;
2526
2527out_failure:
2528 /* XXX This might be a good location to set ESP_TGT_BROKEN
2529 * XXX since we know which target/lun in particular is
2530 * XXX causing trouble.
2531 */
2532 spin_unlock_irqrestore(esp->host->host_lock, flags);
2533 return FAILED;
2534}
2535
2536static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd)
2537{
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002538 struct esp *esp = shost_priv(cmd->device->host);
David S. Millercd9ad582007-04-26 21:19:23 -07002539 struct completion eh_reset;
2540 unsigned long flags;
2541
2542 init_completion(&eh_reset);
2543
2544 spin_lock_irqsave(esp->host->host_lock, flags);
2545
2546 esp->eh_reset = &eh_reset;
2547
2548 /* XXX This is too simple... We should add lots of
2549 * XXX checks here so that if we find that the chip is
2550 * XXX very wedged we return failure immediately so
2551 * XXX that we can perform a full chip reset.
2552 */
2553 esp->flags |= ESP_FLAG_RESETTING;
2554 scsi_esp_cmd(esp, ESP_CMD_RS);
2555
2556 spin_unlock_irqrestore(esp->host->host_lock, flags);
2557
2558 ssleep(esp_bus_reset_settle);
2559
2560 if (!wait_for_completion_timeout(&eh_reset, 5 * HZ)) {
2561 spin_lock_irqsave(esp->host->host_lock, flags);
2562 esp->eh_reset = NULL;
2563 spin_unlock_irqrestore(esp->host->host_lock, flags);
2564
2565 return FAILED;
2566 }
2567
2568 return SUCCESS;
2569}
2570
2571/* All bets are off, reset the entire device. */
2572static int esp_eh_host_reset_handler(struct scsi_cmnd *cmd)
2573{
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002574 struct esp *esp = shost_priv(cmd->device->host);
David S. Millercd9ad582007-04-26 21:19:23 -07002575 unsigned long flags;
2576
2577 spin_lock_irqsave(esp->host->host_lock, flags);
2578 esp_bootup_reset(esp);
2579 esp_reset_cleanup(esp);
2580 spin_unlock_irqrestore(esp->host->host_lock, flags);
2581
2582 ssleep(esp_bus_reset_settle);
2583
2584 return SUCCESS;
2585}
2586
2587static const char *esp_info(struct Scsi_Host *host)
2588{
2589 return "esp";
2590}
2591
2592struct scsi_host_template scsi_esp_template = {
2593 .module = THIS_MODULE,
2594 .name = "esp",
2595 .info = esp_info,
2596 .queuecommand = esp_queuecommand,
2597 .slave_alloc = esp_slave_alloc,
2598 .slave_configure = esp_slave_configure,
2599 .slave_destroy = esp_slave_destroy,
2600 .eh_abort_handler = esp_eh_abort_handler,
2601 .eh_bus_reset_handler = esp_eh_bus_reset_handler,
2602 .eh_host_reset_handler = esp_eh_host_reset_handler,
2603 .can_queue = 7,
2604 .this_id = 7,
2605 .sg_tablesize = SG_ALL,
2606 .use_clustering = ENABLE_CLUSTERING,
2607 .max_sectors = 0xffff,
2608 .skip_settle_delay = 1,
2609};
2610EXPORT_SYMBOL(scsi_esp_template);
2611
2612static void esp_get_signalling(struct Scsi_Host *host)
2613{
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002614 struct esp *esp = shost_priv(host);
David S. Millercd9ad582007-04-26 21:19:23 -07002615 enum spi_signal_type type;
2616
2617 if (esp->flags & ESP_FLAG_DIFFERENTIAL)
2618 type = SPI_SIGNAL_HVD;
2619 else
2620 type = SPI_SIGNAL_SE;
2621
2622 spi_signalling(host) = type;
2623}
2624
2625static void esp_set_offset(struct scsi_target *target, int offset)
2626{
2627 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002628 struct esp *esp = shost_priv(host);
David S. Millercd9ad582007-04-26 21:19:23 -07002629 struct esp_target_data *tp = &esp->target[target->id];
2630
2631 tp->nego_goal_offset = offset;
2632 tp->flags |= ESP_TGT_CHECK_NEGO;
2633}
2634
2635static void esp_set_period(struct scsi_target *target, int period)
2636{
2637 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002638 struct esp *esp = shost_priv(host);
David S. Millercd9ad582007-04-26 21:19:23 -07002639 struct esp_target_data *tp = &esp->target[target->id];
2640
2641 tp->nego_goal_period = period;
2642 tp->flags |= ESP_TGT_CHECK_NEGO;
2643}
2644
2645static void esp_set_width(struct scsi_target *target, int width)
2646{
2647 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002648 struct esp *esp = shost_priv(host);
David S. Millercd9ad582007-04-26 21:19:23 -07002649 struct esp_target_data *tp = &esp->target[target->id];
2650
2651 tp->nego_goal_width = (width ? 1 : 0);
2652 tp->flags |= ESP_TGT_CHECK_NEGO;
2653}
2654
2655static struct spi_function_template esp_transport_ops = {
2656 .set_offset = esp_set_offset,
2657 .show_offset = 1,
2658 .set_period = esp_set_period,
2659 .show_period = 1,
2660 .set_width = esp_set_width,
2661 .show_width = 1,
2662 .get_signalling = esp_get_signalling,
2663};
2664
2665static int __init esp_init(void)
2666{
2667 BUILD_BUG_ON(sizeof(struct scsi_pointer) <
2668 sizeof(struct esp_cmd_priv));
2669
2670 esp_transport_template = spi_attach_transport(&esp_transport_ops);
2671 if (!esp_transport_template)
2672 return -ENODEV;
2673
2674 return 0;
2675}
2676
2677static void __exit esp_exit(void)
2678{
2679 spi_release_transport(esp_transport_template);
2680}
2681
2682MODULE_DESCRIPTION("ESP SCSI driver core");
2683MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
2684MODULE_LICENSE("GPL");
2685MODULE_VERSION(DRV_VERSION);
2686
2687module_param(esp_bus_reset_settle, int, 0);
2688MODULE_PARM_DESC(esp_bus_reset_settle,
2689 "ESP scsi bus reset delay in seconds");
2690
2691module_param(esp_debug, int, 0);
2692MODULE_PARM_DESC(esp_debug,
2693"ESP bitmapped debugging message enable value:\n"
2694" 0x00000001 Log interrupt events\n"
2695" 0x00000002 Log scsi commands\n"
2696" 0x00000004 Log resets\n"
2697" 0x00000008 Log message in events\n"
2698" 0x00000010 Log message out events\n"
2699" 0x00000020 Log command completion\n"
2700" 0x00000040 Log disconnects\n"
2701" 0x00000080 Log data start\n"
2702" 0x00000100 Log data done\n"
2703" 0x00000200 Log reconnects\n"
2704" 0x00000400 Log auto-sense data\n"
2705);
2706
2707module_init(esp_init);
2708module_exit(esp_exit);